diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..3ce8ff23 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "jedi/third_party/typeshed"] + path = jedi/third_party/typeshed + url = https://github.com/davidhalter/typeshed.git diff --git a/.travis.yml b/.travis.yml index 83dcad01..eb9aa876 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,6 @@ python: env: - JEDI_TEST_ENVIRONMENT=27 - - JEDI_TEST_ENVIRONMENT=33 - JEDI_TEST_ENVIRONMENT=34 - JEDI_TEST_ENVIRONMENT=35 - JEDI_TEST_ENVIRONMENT=36 @@ -23,7 +22,6 @@ matrix: allow_failures: - python: pypy - env: TOXENV=sith - - python: 3.7-dev include: - python: 3.6 env: @@ -34,12 +32,13 @@ matrix: # For now ignore pypy, there are so many issues that we don't really need # to run it. #- python: pypy - - python: "3.7-dev" + - python: "nightly" + env: + - JEDI_TEST_ENVIRONMENT=36 before_install: - ./travis_install.sh # Need to add the path to the Python versions in the end. This might add # something twice, but it doesn't really matter, because they are appended. - - export PATH=$PATH:/opt/python/3.3/bin - export PATH=$PATH:/opt/python/3.5/bin # 3.6 was not installed manually, but already is on the system. However # it's not on path (unless 3.6 is selected). diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6c1794e5..151a0a91 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -3,6 +3,33 @@ Changelog --------- +0.13.3 (2019-02-24) ++++++++++++++++++++ + +- Fixed an issue with embedded Python, see https://github.com/davidhalter/jedi-vim/issues/870 + +0.13.2 (2018-12-15) ++++++++++++++++++++ + +- Fixed a bug that led to Jedi spawning a lot of subprocesses. + +0.13.1 (2018-10-02) ++++++++++++++++++++ + +- Bugfixes, because tensorflow completions were still slow. + +0.13.0 (2018-10-02) ++++++++++++++++++++ + +- A small release. Some bug fixes. +- Remove Python 3.3 support. Python 3.3 support has been dropped by the Python + foundation. +- Default environments are now using the same Python version as the Python + process. In 0.12.x, we used to load the latest Python version on the system. +- Added ``include_builtins`` as a parameter to usages. +- ``goto_assignments`` has a new ``follow_builtin_imports`` parameter that + changes the previous behavior slightly. + 0.12.1 (2018-06-30) +++++++++++++++++++ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 609e6395..d791bae9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,4 +5,4 @@ Pull Requests are great. 3. Add your name to AUTHORS.txt 4. Push to your fork and submit a pull request. -**Try to use the PEP8 style guide.** +**Try to use the PEP8 style guide** (and it's ok to have a line length of 100 characters). diff --git a/MANIFEST.in b/MANIFEST.in index 5ba58e6c..94869628 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -8,8 +8,10 @@ include conftest.py include pytest.ini include tox.ini include requirements.txt -include jedi/evaluate/compiled/fake/*.pym include jedi/parser/python/grammar*.txt +recursive-include jedi/third_party *.pyi +include jedi/third_party/typeshed/LICENSE +include jedi/third_party/typeshed/README recursive-include test * recursive-include docs * recursive-exclude * *.pyc diff --git a/README.rst b/README.rst index 12015d13..64c7f903 100644 --- a/README.rst +++ b/README.rst @@ -111,8 +111,8 @@ understands, see: `Features `_. A list of caveats can be found on the same page. -You can run Jedi on CPython 2.7 or 3.3+ but it should also -understand/parse code older than those versions. Additonally you should be able +You can run Jedi on CPython 2.7 or 3.4+ but it should also +understand/parse code older than those versions. Additionally you should be able to use `Virtualenvs `_ very well. @@ -122,7 +122,7 @@ Tips on how to use Jedi efficiently can be found `here API --- -You can find the documentation for the `API here `_. +You can find the documentation for the `API here `_. Autocompletion / Goto / Pydoc diff --git a/appveyor.yml b/appveyor.yml index e4748115..c1a246e0 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -3,9 +3,6 @@ environment: - TOXENV: py27 PYTHON_PATH: C:\Python27 JEDI_TEST_ENVIRONMENT: 27 - - TOXENV: py27 - PYTHON_PATH: C:\Python27 - JEDI_TEST_ENVIRONMENT: 33 - TOXENV: py27 PYTHON_PATH: C:\Python27 JEDI_TEST_ENVIRONMENT: 34 @@ -15,29 +12,13 @@ environment: - TOXENV: py27 PYTHON_PATH: C:\Python27 JEDI_TEST_ENVIRONMENT: 36 - - - TOXENV: py33 - PYTHON_PATH: C:\Python33 - JEDI_TEST_ENVIRONMENT: 27 - - TOXENV: py33 - PYTHON_PATH: C:\Python33 - JEDI_TEST_ENVIRONMENT: 33 - - TOXENV: py33 - PYTHON_PATH: C:\Python33 - JEDI_TEST_ENVIRONMENT: 34 - - TOXENV: py33 - PYTHON_PATH: C:\Python33 - JEDI_TEST_ENVIRONMENT: 35 - - TOXENV: py33 - PYTHON_PATH: C:\Python33 - JEDI_TEST_ENVIRONMENT: 36 + - TOXENV: py27 + PYTHON_PATH: C:\Python27 + JEDI_TEST_ENVIRONMENT: 37 - TOXENV: py34 PYTHON_PATH: C:\Python34 JEDI_TEST_ENVIRONMENT: 27 - - TOXENV: py34 - PYTHON_PATH: C:\Python34 - JEDI_TEST_ENVIRONMENT: 33 - TOXENV: py34 PYTHON_PATH: C:\Python34 JEDI_TEST_ENVIRONMENT: 34 @@ -47,13 +28,13 @@ environment: - TOXENV: py34 PYTHON_PATH: C:\Python34 JEDI_TEST_ENVIRONMENT: 36 + - TOXENV: py34 + PYTHON_PATH: C:\Python34 + JEDI_TEST_ENVIRONMENT: 37 - TOXENV: py35 PYTHON_PATH: C:\Python35 JEDI_TEST_ENVIRONMENT: 27 - - TOXENV: py35 - PYTHON_PATH: C:\Python35 - JEDI_TEST_ENVIRONMENT: 33 - TOXENV: py35 PYTHON_PATH: C:\Python35 JEDI_TEST_ENVIRONMENT: 34 @@ -63,13 +44,13 @@ environment: - TOXENV: py35 PYTHON_PATH: C:\Python35 JEDI_TEST_ENVIRONMENT: 36 + - TOXENV: py35 + PYTHON_PATH: C:\Python35 + JEDI_TEST_ENVIRONMENT: 37 - TOXENV: py36 PYTHON_PATH: C:\Python36 JEDI_TEST_ENVIRONMENT: 27 - - TOXENV: py36 - PYTHON_PATH: C:\Python36 - JEDI_TEST_ENVIRONMENT: 33 - TOXENV: py36 PYTHON_PATH: C:\Python36 JEDI_TEST_ENVIRONMENT: 34 @@ -79,7 +60,27 @@ environment: - TOXENV: py36 PYTHON_PATH: C:\Python36 JEDI_TEST_ENVIRONMENT: 36 + - TOXENV: py36 + PYTHON_PATH: C:\Python36 + JEDI_TEST_ENVIRONMENT: 37 + + - TOXENV: py37 + PYTHON_PATH: C:\Python37 + JEDI_TEST_ENVIRONMENT: 27 + - TOXENV: py37 + PYTHON_PATH: C:\Python37 + JEDI_TEST_ENVIRONMENT: 34 + - TOXENV: py37 + PYTHON_PATH: C:\Python37 + JEDI_TEST_ENVIRONMENT: 35 + - TOXENV: py37 + PYTHON_PATH: C:\Python37 + JEDI_TEST_ENVIRONMENT: 36 + - TOXENV: py37 + PYTHON_PATH: C:\Python37 + JEDI_TEST_ENVIRONMENT: 37 install: + - git submodule update --init --recursive - set PATH=%PYTHON_PATH%;%PYTHON_PATH%\Scripts;%PATH% - pip install tox build_script: diff --git a/conftest.py b/conftest.py index 500799ec..20d6fd68 100644 --- a/conftest.py +++ b/conftest.py @@ -6,7 +6,7 @@ from functools import partial import pytest import jedi -from jedi.api.environment import get_default_environment, get_system_environment +from jedi.api.environment import get_system_environment, InterpreterEnvironment from jedi._compatibility import py_version collect_ignore = [ @@ -41,6 +41,9 @@ def pytest_addoption(parser): parser.addoption("--env", action='store', help="Execute the tests in that environment (e.g. 35 for python3.5).") + parser.addoption("--interpreter-env", "-I", action='store_true', + help="Don't use subprocesses to guarantee having safe " + "code execution. Useful for debugging.") def pytest_configure(config): @@ -87,13 +90,13 @@ def clean_jedi_cache(request): @pytest.fixture(scope='session') def environment(request): + if request.config.option.interpreter_env: + return InterpreterEnvironment() + version = request.config.option.env if version is None: version = os.environ.get('JEDI_TEST_ENVIRONMENT', str(py_version)) - if int(version) == py_version: - return get_default_environment() - return get_system_environment(version[0] + '.' + version[1:]) @@ -116,3 +119,11 @@ def has_typing(environment): @pytest.fixture(scope='session') def jedi_path(): return os.path.dirname(__file__) + + +@pytest.fixture() +def skip_python2(environment): + if environment.version_info.major == 2: + # This if is just needed to avoid that tests ever skip way more than + # they should for all Python versions. + pytest.skip() diff --git a/deploy-master.sh b/deploy-master.sh index 66a290d6..eadddfe2 100755 --- a/deploy-master.sh +++ b/deploy-master.sh @@ -21,6 +21,7 @@ rm -rf $PROJECT_NAME git clone .. $PROJECT_NAME cd $PROJECT_NAME git checkout $BRANCH +git submodule update --init # Test first. tox diff --git a/docs/docs/features.rst b/docs/docs/features.rst index 9a3cb052..d474b591 100644 --- a/docs/docs/features.rst +++ b/docs/docs/features.rst @@ -20,7 +20,7 @@ make it work. General Features ---------------- -- Python 2.7 and 3.3+ support +- Python 2.7 and 3.4+ support - Ignores syntax errors and wrong indentation - Can deal with complex module / function / class structures - Great Virtualenv support diff --git a/jedi/__init__.py b/jedi/__init__.py index 30b19216..215f13f5 100644 --- a/jedi/__init__.py +++ b/jedi/__init__.py @@ -19,24 +19,24 @@ example for the autocompletion feature: >>> import jedi >>> source = ''' -... import datetime -... datetime.da''' ->>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py') +... import json +... json.lo''' +>>> script = jedi.Script(source, 3, len('json.lo'), 'example.py') >>> script - + >>> completions = script.completions() ->>> completions #doctest: +ELLIPSIS -[, , ...] +>>> completions +[, ] >>> print(completions[0].complete) -te +ad >>> print(completions[0].name) -date +load As you see Jedi is pretty simple and allows you to concentrate on writing a good text editor, while still having very good IDE features for Python. """ -__version__ = '0.12.1' +__version__ = '0.14.0' from jedi.api import Script, Interpreter, set_debug_function, \ preload_module, names diff --git a/jedi/_compatibility.py b/jedi/_compatibility.py index 2002a1ca..8ce0e685 100644 --- a/jedi/_compatibility.py +++ b/jedi/_compatibility.py @@ -2,6 +2,7 @@ To ensure compatibility from Python ``2.7`` - ``3.x``, a module has been created. Clearly there is huge need to use conforming syntax. """ +from __future__ import print_function import errno import sys import os @@ -14,10 +15,11 @@ try: import importlib except ImportError: pass +from zipimport import zipimporter + +from parso.file_io import KnownContentFileIO is_py3 = sys.version_info[0] >= 3 -is_py33 = is_py3 and sys.version_info[1] >= 3 -is_py34 = is_py3 and sys.version_info[1] >= 4 is_py35 = is_py3 and sys.version_info[1] >= 5 py_version = int(str(sys.version_info[0]) + str(sys.version_info[1])) @@ -34,24 +36,36 @@ class DummyFile(object): del self.loader -def find_module_py34(string, path=None, full_name=None): +def find_module_py34(string, path=None, full_name=None, is_global_search=True): spec = None loader = None - spec = importlib.machinery.PathFinder.find_spec(string, path) - if spec is not None: - # We try to disambiguate implicit namespace pkgs with non implicit namespace pkgs - if not spec.has_location: - full_name = string if not path else full_name - implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path) - return None, implicit_ns_info, False + for finder in sys.meta_path: + if is_global_search and finder != importlib.machinery.PathFinder: + p = None + else: + p = path + try: + find_spec = finder.find_spec + except AttributeError: + # These are old-school clases that still have a different API, just + # ignore those. + continue + + spec = find_spec(string, p) + if spec is not None: + loader = spec.loader + if loader is None and not spec.has_location: + # This is a namespace package. + full_name = string if not path else full_name + implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path) + return implicit_ns_info, True + break - # we have found the tail end of the dotted path - loader = spec.loader return find_module_py33(string, path, loader) -def find_module_py33(string, path=None, loader=None, full_name=None): +def find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True): loader = loader or importlib.machinery.PathFinder.find_module(string, path) if loader is None and path is None: # Fallback to find builtins @@ -71,47 +85,91 @@ def find_module_py33(string, path=None, loader=None, full_name=None): if loader is None: raise ImportError("Couldn't find a loader for {}".format(string)) + return _from_loader(loader, string) + + +class ZipFileIO(KnownContentFileIO): + """For .zip and .egg archives""" + def __init__(self, path, code, zip_path): + super(ZipFileIO, self).__init__(path, code) + self._zip_path = zip_path + + def get_last_modified(self): + return os.path.getmtime(self._zip_path) + + +def _from_loader(loader, string): + is_package = loader.is_package(string) try: - is_package = loader.is_package(string) - if is_package: - if hasattr(loader, 'path'): - module_path = os.path.dirname(loader.path) - else: - # At least zipimporter does not have path attribute - module_path = os.path.dirname(loader.get_filename(string)) - if hasattr(loader, 'archive'): - module_file = DummyFile(loader, string) - else: - module_file = None - else: - module_path = loader.get_filename(string) - module_file = DummyFile(loader, string) + get_filename = loader.get_filename except AttributeError: - # ExtensionLoader has not attribute get_filename, instead it has a - # path attribute that we can use to retrieve the module path - try: - module_path = loader.path - module_file = DummyFile(loader, string) - except AttributeError: - module_path = string - module_file = None - finally: - is_package = False + return None, is_package + else: + module_path = cast_path(get_filename(string)) - if hasattr(loader, 'archive'): - module_path = loader.archive + # To avoid unicode and read bytes, "overwrite" loader.get_source if + # possible. + f = type(loader).get_source + if is_py3 and f is not importlib.machinery.SourceFileLoader.get_source: + # Unfortunately we are reading unicode here, not bytes. + # It seems hard to get bytes, because the zip importer + # logic just unpacks the zip file and returns a file descriptor + # that we cannot as easily access. Therefore we just read it as + # a string in the cases where get_source was overwritten. + code = loader.get_source(string) + else: + code = _get_source(loader, string) - return module_file, module_path, is_package + if code is None: + return None, is_package + if isinstance(loader, zipimporter): + return ZipFileIO(module_path, code, cast_path(loader.archive)), is_package + + return KnownContentFileIO(module_path, code), is_package -def find_module_pre_py33(string, path=None, full_name=None): +def _get_source(loader, fullname): + """ + This method is here as a replacement for SourceLoader.get_source. That + method returns unicode, but we prefer bytes. + """ + path = loader.get_filename(fullname) + try: + return loader.get_data(path) + except OSError: + raise ImportError('source not available through get_data()', + name=fullname) + + +def find_module_pre_py3(string, path=None, full_name=None, is_global_search=True): # This import is here, because in other places it will raise a # DeprecationWarning. import imp try: module_file, module_path, description = imp.find_module(string, path) module_type = description[2] - return module_file, module_path, module_type is imp.PKG_DIRECTORY + is_package = module_type is imp.PKG_DIRECTORY + if is_package: + # In Python 2 directory package imports are returned as folder + # paths, not __init__.py paths. + p = os.path.join(module_path, '__init__.py') + try: + module_file = open(p) + module_path = p + except FileNotFoundError: + pass + elif module_type != imp.PY_SOURCE: + if module_file is not None: + module_file.close() + module_file = None + + if module_file is None: + code = None + return None, is_package + + with module_file: + code = module_file.read() + return KnownContentFileIO(cast_path(module_path), code), is_package except ImportError: pass @@ -120,27 +178,13 @@ def find_module_pre_py33(string, path=None, full_name=None): for item in path: loader = pkgutil.get_importer(item) if loader: - try: - loader = loader.find_module(string) - if loader: - is_package = loader.is_package(string) - is_archive = hasattr(loader, 'archive') - module_path = loader.get_filename(string) - if is_package: - module_path = os.path.dirname(module_path) - if is_archive: - module_path = loader.archive - file = None - if not is_package or is_archive: - file = DummyFile(loader, string) - return file, module_path, is_package - except ImportError: - pass + loader = loader.find_module(string) + if loader is not None: + return _from_loader(loader, string) raise ImportError("No module named {}".format(string)) -find_module = find_module_py33 if is_py33 else find_module_pre_py33 -find_module = find_module_py34 if is_py34 else find_module +find_module = find_module_py34 if is_py3 else find_module_pre_py3 find_module.__doc__ = """ Provides information about a module. @@ -207,6 +251,7 @@ def _iter_modules(paths, prefix=''): yield importer, prefix + modname, ispkg # END COPY + iter_modules = _iter_modules if py_version >= 34 else pkgutil.iter_modules @@ -252,6 +297,7 @@ Usage:: """ + class Python3Method(object): def __init__(self, func): self.func = func @@ -312,10 +358,10 @@ def force_unicode(obj): try: import builtins # module name in python 3 except ImportError: - import __builtin__ as builtins + import __builtin__ as builtins # noqa: F401 -import ast +import ast # noqa: F401 def literal_eval(string): @@ -325,7 +371,7 @@ def literal_eval(string): try: from itertools import zip_longest except ImportError: - from itertools import izip_longest as zip_longest # Python 2 + from itertools import izip_longest as zip_longest # Python 2 # noqa: F401 try: FileNotFoundError = FileNotFoundError @@ -337,6 +383,11 @@ try: except NameError: NotADirectoryError = IOError +try: + PermissionError = PermissionError +except NameError: + PermissionError = IOError + def no_unicode_pprint(dct): """ @@ -350,13 +401,6 @@ def no_unicode_pprint(dct): print(re.sub("u'", "'", s)) -def print_to_stderr(*args): - if is_py3: - eval("print(*args, file=sys.stderr)") - else: - print >> sys.stderr, args - - def utf8_repr(func): """ ``__repr__`` methods in Python 2 don't allow unicode objects to be @@ -378,7 +422,7 @@ def utf8_repr(func): if is_py3: import queue else: - import Queue as queue + import Queue as queue # noqa: F401 try: # Attempt to load the C implementation of pickle on Python 2 as it is way @@ -459,8 +503,24 @@ def pickle_load(file): raise +def _python2_dct_keys_to_unicode(data): + """ + Python 2 stores object __dict__ entries as bytes, not unicode, correct it + here. Python 2 can deal with both, Python 3 expects unicode. + """ + if isinstance(data, tuple): + return tuple(_python2_dct_keys_to_unicode(x) for x in data) + elif isinstance(data, list): + return list(_python2_dct_keys_to_unicode(x) for x in data) + elif hasattr(data, '__dict__') and type(data.__dict__) == dict: + data.__dict__ = {unicode(k): v for k, v in data.__dict__.items()} + return data + + def pickle_dump(data, file, protocol): try: + if not is_py3: + data = _python2_dct_keys_to_unicode(data) pickle.dump(data, file, protocol) # On Python 3.3 flush throws sometimes an error even though the writing # operation should be completed. @@ -507,6 +567,9 @@ class GeneralizedPopen(subprocess.Popen): except AttributeError: CREATE_NO_WINDOW = 0x08000000 kwargs['creationflags'] = CREATE_NO_WINDOW + # The child process doesn't need file descriptors except 0, 1, 2. + # This is unix only. + kwargs['close_fds'] = 'posix' in sys.builtin_module_names super(GeneralizedPopen, self).__init__(*args, **kwargs) @@ -544,7 +607,7 @@ def which(cmd, mode=os.F_OK | os.X_OK, path=None): if sys.platform == "win32": # The current directory takes precedence on Windows. - if not os.curdir in path: + if os.curdir not in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. @@ -565,7 +628,7 @@ def which(cmd, mode=os.F_OK | os.X_OK, path=None): seen = set() for dir in path: normdir = os.path.normcase(dir) - if not normdir in seen: + if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) diff --git a/jedi/api/__init__.py b/jedi/api/__init__.py index 3eacee63..615d2248 100644 --- a/jedi/api/__init__.py +++ b/jedi/api/__init__.py @@ -11,11 +11,12 @@ arguments. """ import os import sys +import warnings import parso from parso.python import tree -from jedi._compatibility import force_unicode, is_py3 +from jedi._compatibility import force_unicode, cast_path, is_py3 from jedi.parser_utils import get_executable_nodes from jedi import debug from jedi import settings @@ -31,11 +32,15 @@ from jedi.evaluate import imports from jedi.evaluate import usages from jedi.evaluate.arguments import try_iter_content from jedi.evaluate.helpers import get_module_names, evaluate_call_of_leaf -from jedi.evaluate.sys_path import dotted_path_in_sys_path -from jedi.evaluate.filters import TreeNameDefinition, ParamName +from jedi.evaluate.sys_path import transform_path_to_dotted +from jedi.evaluate.names import TreeNameDefinition, ParamName from jedi.evaluate.syntax_tree import tree_name_to_contexts from jedi.evaluate.context import ModuleContext +from jedi.evaluate.base_context import ContextSet from jedi.evaluate.context.iterable import unpack_tuple_to_dict +from jedi.evaluate.gradual.conversion import try_stubs_to_actual_context_set, \ + try_stub_to_actual_names +from jedi.evaluate.gradual.utils import load_proper_stub_module # Jedi uses lots and lots of recursion. By setting this a little bit higher, we # can remove some "maximum recursion depth" errors. @@ -74,16 +79,14 @@ class Script(object): :param encoding: The encoding of ``source``, if it is not a ``unicode`` object (default ``'utf-8'``). :type encoding: str - :param source_encoding: The encoding of ``source``, if it is not a - ``unicode`` object (default ``'utf-8'``). - :type encoding: str :param sys_path: ``sys.path`` to use during analysis of the script :type sys_path: list :param environment: TODO - :type sys_path: Environment + :type environment: Environment """ def __init__(self, source=None, line=None, column=None, path=None, - encoding='utf-8', sys_path=None, environment=None): + encoding='utf-8', sys_path=None, environment=None, + _project=None): self._orig_path = path # An empty path (also empty string) should always result in no path. self.path = os.path.abspath(path) if path else None @@ -99,24 +102,27 @@ class Script(object): if sys_path is not None and not is_py3: sys_path = list(map(force_unicode, sys_path)) - # Load the Python grammar of the current interpreter. - project = get_default_project( - os.path.dirname(self.path)if path else os.getcwd() - ) + project = _project + if project is None: + # Load the Python grammar of the current interpreter. + project = get_default_project( + os.path.dirname(self.path)if path else os.getcwd() + ) # TODO deprecate and remove sys_path from the Script API. if sys_path is not None: project._sys_path = sys_path self._evaluator = Evaluator( project, environment=environment, script_path=self.path ) - self._project = project debug.speed('init') self._module_node, source = self._evaluator.parse_and_get_code( code=source, path=self.path, + encoding=encoding, + use_latest_grammar=path and path.endswith('.pyi'), cache=False, # No disk cache, because the current script often changes. - diff_cache=True, - cache_path=settings.cache_directory + diff_cache=settings.fast_parser, + cache_path=settings.cache_directory, ) debug.speed('parsed') self._code_lines = parso.split_lines(source, keepends=True) @@ -134,29 +140,64 @@ class Script(object): column = line_len if column is None else column if not (0 <= column <= line_len): - raise ValueError('`column` parameter is not in a valid range.') + raise ValueError('`column` parameter (%d) is not in a valid range ' + '(0-%d) for line %d (%r).' % ( + column, line_len, line, line_string)) self._pos = line, column self._path = path cache.clear_time_caches() debug.reset_time() + # Cache the module, this is mostly useful for testing, since this shouldn't + # be called multiple times. + @cache.memoize_method def _get_module(self): - name = '__main__' + names = None + is_package = False if self.path is not None: - n = dotted_path_in_sys_path(self._evaluator.get_sys_path(), self.path) - if n is not None: - name = n + import_names, is_p = transform_path_to_dotted( + self._evaluator.get_sys_path(add_parent_paths=False), + self.path + ) + if import_names is not None: + names = import_names + is_package = is_p + + if self.path is not None and self.path.endswith('.pyi'): + # We are in a stub file. Try to load the stub properly. + stub_module = load_proper_stub_module( + self._evaluator, + cast_path(self.path), + names, + self._module_node + ) + if stub_module is not None: + return stub_module + + if names is None: + names = ('__main__',) module = ModuleContext( - self._evaluator, self._module_node, self.path, - code_lines=self._code_lines + self._evaluator, self._module_node, cast_path(self.path), + string_names=names, + code_lines=self._code_lines, + is_package=is_package, ) - imports.add_module_to_cache(self._evaluator, name, module) + #module, = try_to_merge_with_stub( + # self._evaluator, None, module.string_names, ContextSet([module]) + #) + if names[0] not in ('builtins', '__builtin__', 'typing'): + # These modules are essential for Jedi, so don't overwrite them. + self._evaluator.module_cache.add(names, ContextSet([module])) return module def __repr__(self): - return '<%s: %s>' % (self.__class__.__name__, repr(self._orig_path)) + return '<%s: %s %r>' % ( + self.__class__.__name__, + repr(self._orig_path), + self._evaluator.environment, + ) def completions(self): """ @@ -172,6 +213,24 @@ class Script(object): self._pos, self.call_signatures ) completions = completion.completions() + + def iter_import_completions(): + for c in completions: + tree_name = c._name.tree_name + if tree_name is None: + continue + definition = tree_name.get_definition() + if definition is not None \ + and definition.type in ('import_name', 'import_from'): + yield c + + if len(list(iter_import_completions())) > 10: + # For now disable completions if there's a lot of imports that + # might potentially be resolved. This is the case for tensorflow + # and has been fixed for it. This is obviously temporary until we + # have a better solution. + self._evaluator.infer_enabled = False + debug.speed('completions end') return completions @@ -203,42 +262,52 @@ class Script(object): # the API. return helpers.sorted_definitions(set(defs)) - def goto_assignments(self, follow_imports=False): + def goto_assignments(self, follow_imports=False, follow_builtin_imports=False): """ Return the first definition found, while optionally following imports. Multiple objects may be returned, because Python itself is a dynamic language, which means depending on an option you can have two different versions of a function. + :param follow_imports: The goto call will follow imports. + :param follow_builtin_imports: If follow_imports is True will decide if + it follow builtin imports. :rtype: list of :class:`classes.Definition` """ def filter_follow_imports(names, check): for name in names: if check(name): - for result in filter_follow_imports(name.goto(), check): - yield result + new_names = list(filter_follow_imports(name.goto(), check)) + found_builtin = False + if follow_builtin_imports: + for new_name in new_names: + if new_name.start_pos is None: + found_builtin = True + + if found_builtin: + yield name + else: + for new_name in new_names: + yield new_name else: yield name tree_name = self._module_node.get_name_of_position(self._pos) if tree_name is None: - return [] + # Without a name we really just want to jump to the result e.g. + # executed by `foo()`, if we the cursor is after `)`. + return self.goto_definitions() context = self._evaluator.create_context(self._get_module(), tree_name) names = list(self._evaluator.goto(context, tree_name)) if follow_imports: - def check(name): - return name.is_import() - else: - def check(name): - return isinstance(name, imports.SubModuleName) - - names = filter_follow_imports(names, check) + names = filter_follow_imports(names, lambda name: name.is_import()) + names = try_stub_to_actual_names(names, prefer_stub_to_compiled=True) defs = [classes.Definition(self._evaluator, d) for d in set(names)] return helpers.sorted_definitions(defs) - def usages(self, additional_module_paths=()): + def usages(self, additional_module_paths=(), **kwargs): """ Return :class:`classes.Definition` objects, which contain all names that point to the definition of the name under the cursor. This @@ -247,17 +316,31 @@ class Script(object): .. todo:: Implement additional_module_paths + :param additional_module_paths: Deprecated, never ever worked. + :param include_builtins: Default True, checks if a usage is a builtin + (e.g. ``sys``) and in that case does not return it. :rtype: list of :class:`classes.Definition` """ - tree_name = self._module_node.get_name_of_position(self._pos) - if tree_name is None: - # Must be syntax - return [] + if additional_module_paths: + warnings.warn( + "Deprecated since version 0.12.0. This never even worked, just ignore it.", + DeprecationWarning, + stacklevel=2 + ) - names = usages.usages(self._get_module(), tree_name) + def _usages(include_builtins=True): + tree_name = self._module_node.get_name_of_position(self._pos) + if tree_name is None: + # Must be syntax + return [] - definitions = [classes.Definition(self._evaluator, n) for n in names] - return helpers.sorted_definitions(definitions) + names = usages.usages(self._get_module(), tree_name) + + definitions = [classes.Definition(self._evaluator, n) for n in names] + if not include_builtins: + definitions = [d for d in definitions if not d.in_builtin_module()] + return helpers.sorted_definitions(definitions) + return _usages(**kwargs) def call_signatures(self): """ @@ -293,11 +376,13 @@ class Script(object): ) debug.speed('func_call followed') - return [classes.CallSignature(self._evaluator, d.name, + # TODO here we use stubs instead of the actual contexts. We should use + # the signatures from stubs, but the actual contexts, probably?! + return [classes.CallSignature(self._evaluator, signature, call_signature_details.bracket_leaf.start_pos, call_signature_details.call_index, call_signature_details.keyword_name_str) - for d in definitions if hasattr(d, 'py__call__')] + for signature in definitions.get_signatures()] def _analysis(self): self._evaluator.is_analysis = True diff --git a/jedi/api/classes.py b/jedi/api/classes.py index 4168f6e9..513bfbf7 100644 --- a/jedi/api/classes.py +++ b/jedi/api/classes.py @@ -13,8 +13,10 @@ from jedi.cache import memoize_method from jedi.evaluate import imports from jedi.evaluate import compiled from jedi.evaluate.imports import ImportName -from jedi.evaluate.context import instance -from jedi.evaluate.context import ClassContext, FunctionContext, FunctionExecutionContext +from jedi.evaluate.names import ParamName +from jedi.evaluate.context import FunctionExecutionContext, MethodContext +from jedi.evaluate.gradual.typeshed import StubModuleContext +from jedi.evaluate.gradual.conversion import name_to_stub, stub_to_actual_context_set from jedi.api.keywords import KeywordName @@ -58,17 +60,26 @@ class BaseDefinition(object): self._evaluator = evaluator self._name = name """ - An instance of :class:`parso.reprsentation.Name` subclass. + An instance of :class:`parso.python.tree.Name` subclass. """ self.is_keyword = isinstance(self._name, KeywordName) - # generate a path to the definition - self._module = name.get_root_context() - if self.in_builtin_module(): - self.module_path = None + @memoize_method + def _get_module(self): + # This can take a while to complete, because in the worst case of + # imports (consider `import a` completions), we need to load all + # modules starting with a first. + return self._name.get_root_context() + + @property + def module_path(self): + """Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``""" + try: + py__file__ = self._get_module().py__file__ + except AttributeError: + return None else: - self.module_path = self._module.py__file__() - """Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``""" + return py__file__() @property def name(self): @@ -199,11 +210,14 @@ class BaseDefinition(object): >>> print(d.module_name) # doctest: +ELLIPSIS json """ - return self._module.name.string_name + return self._get_module().name.string_name def in_builtin_module(self): """Whether this is a builtin module.""" - return isinstance(self._module, compiled.CompiledObject) + if isinstance(self._get_module(), StubModuleContext): + return any(isinstance(context, compiled.CompiledObject) + for context in self._get_module().non_stub_context_set) + return isinstance(self._get_module(), compiled.CompiledObject) @property def line(self): @@ -296,16 +310,44 @@ class BaseDefinition(object): return '.'.join(path if path[0] else path[1:]) + def is_stub(self): + return all(c.is_stub() for c in self._name.infer()) + + def goto_stubs(self): + if self.is_stub(): + return [self] + + return [ + Definition(self._evaluator, stub_def.name) + for stub_def in name_to_stub(self._name) + ] + def goto_assignments(self): - if self._name.tree_name is None: - return self + return [self if n == self._name else Definition(self._evaluator, n) + for n in self._name.goto()] - names = self._evaluator.goto(self._name.parent_context, self._name.tree_name) - return [Definition(self._evaluator, n) for n in names] + def infer(self): + tree_name = self._name.tree_name + parent_context = self._name.parent_context + # Param names are special because they are not handled by + # the evaluator method. + if tree_name is None or parent_context is None or isinstance(self._name, ParamName): + context_set = self._name.infer() + else: - def _goto_definitions(self): - # TODO make this function public. - return [Definition(self._evaluator, d.name) for d in self._name.infer()] + # TODO remove this paragraph, it's ugly and shouldn't be needed + inferred = self._name.infer() + if inferred: + inferred = next(iter(inferred)) + if isinstance(inferred, MethodContext): + c = inferred.class_context + else: + c = self._name.parent_context + else: + c = self._name.parent_context + + context_set = self._evaluator.goto_definitions(c, tree_name) + return [Definition(self._evaluator, d.name) for d in context_set] @property @memoize_method @@ -314,38 +356,13 @@ class BaseDefinition(object): Raises an ``AttributeError``if the definition is not callable. Otherwise returns a list of `Definition` that represents the params. """ - def get_param_names(context): - param_names = [] - if context.api_type == 'function': - param_names = list(context.get_param_names()) - if isinstance(context, instance.BoundMethod): - param_names = param_names[1:] - elif isinstance(context, (instance.AbstractInstanceContext, ClassContext)): - if isinstance(context, ClassContext): - search = u'__init__' - else: - search = u'__call__' - names = context.get_function_slot_names(search) - if not names: - return [] + # Only return the first one. There might be multiple one, especially + # with overloading. + for context in self._name.infer(): + for signature in context.get_signatures(): + return [Definition(self._evaluator, n) for n in signature.get_param_names()] - # Just take the first one here, not optimal, but currently - # there's no better solution. - inferred = names[0].infer() - param_names = get_param_names(next(iter(inferred))) - if isinstance(context, ClassContext): - param_names = param_names[1:] - return param_names - elif isinstance(context, compiled.CompiledObject): - return list(context.get_param_names()) - return param_names - - followed = list(self._name.infer()) - if not followed or not hasattr(followed[0], 'py__call__'): - raise AttributeError('There are no params defined on this.') - context = followed[0] # only check the first one. - - return [Definition(self._evaluator, n) for n in get_param_names(context)] + raise AttributeError('There are no params defined on this.') def parent(self): context = self._name.parent_context @@ -353,10 +370,7 @@ class BaseDefinition(object): return None if isinstance(context, FunctionExecutionContext): - # TODO the function context should be a part of the function - # execution context. - context = FunctionContext( - self._evaluator, context.parent_context, context.tree_node) + context = context.function_context return Definition(self._evaluator, context.name) def __repr__(self): @@ -400,7 +414,7 @@ class Completion(BaseDefinition): def _complete(self, like_name): append = '' if settings.add_bracket_after_function \ - and self.type == 'Function': + and self.type == 'function': append = '(' if self._name.api_type == 'param' and self._stack is not None: @@ -536,9 +550,9 @@ class Definition(BaseDefinition): # here. txt = definition.get_code(include_prefix=False) # Delete comments: - txt = re.sub('#[^\n]+\n', ' ', txt) + txt = re.sub(r'#[^\n]+\n', ' ', txt) # Delete multi spaces/newlines - txt = re.sub('\s+', ' ', txt).strip() + txt = re.sub(r'\s+', ' ', txt).strip() return txt @property @@ -597,11 +611,12 @@ class CallSignature(Definition): It knows what functions you are currently in. e.g. `isinstance(` would return the `isinstance` function. without `(` it would return nothing. """ - def __init__(self, evaluator, executable_name, bracket_start_pos, index, key_name_str): - super(CallSignature, self).__init__(evaluator, executable_name) + def __init__(self, evaluator, signature, bracket_start_pos, index, key_name_str): + super(CallSignature, self).__init__(evaluator, signature.name) self._index = index self._key_name_str = key_name_str self._bracket_start_pos = bracket_start_pos + self._signature = signature @property def index(self): @@ -630,6 +645,10 @@ class CallSignature(Definition): return None return self._index + @property + def params(self): + return [Definition(self._evaluator, n) for n in self._signature.get_param_names()] + @property def bracket_start(self): """ @@ -638,9 +657,25 @@ class CallSignature(Definition): """ return self._bracket_start_pos + @property + def _params_str(self): + return ', '.join([p.description[6:] + for p in self.params]) + def __repr__(self): - return '<%s: %s index %s>' % \ - (type(self).__name__, self._name.string_name, self.index) + return '<%s: %s index=%r params=[%s]>' % ( + type(self).__name__, + self._name.string_name, + self._index, + self._params_str, + ) + + +def _format_signatures(context): + return '\n'.join( + signature.to_string() + for signature in context.get_signatures() + ) class _Help(object): @@ -667,9 +702,29 @@ class _Help(object): See :attr:`doc` for example. """ - # TODO: Use all of the followed objects as output. Possibly divinding - # them by a few dashes. + full_doc = '' + # Using the first docstring that we see. for context in self._get_contexts(fast=fast): - return context.py__doc__(include_call_signature=not raw) + if full_doc: + # In case we have multiple contexts, just return all of them + # separated by a few dashes. + full_doc += '\n' + '-' * 30 + '\n' - return '' + doc = context.py__doc__() + + if raw: + signature_text = '' + else: + signature_text = _format_signatures(context) + if not doc and context.is_stub(): + for c in stub_to_actual_context_set(context): + doc = c.py__doc__() + if doc: + break + + if signature_text and doc: + full_doc += signature_text + '\n\n' + doc + else: + full_doc += signature_text + doc + + return full_doc diff --git a/jedi/api/completion.py b/jedi/api/completion.py index 358d726b..481af874 100644 --- a/jedi/api/completion.py +++ b/jedi/api/completion.py @@ -9,8 +9,9 @@ from jedi.api import classes from jedi.api import helpers from jedi.evaluate import imports from jedi.api import keywords -from jedi.evaluate.helpers import evaluate_call_of_leaf +from jedi.evaluate.helpers import evaluate_call_of_leaf, parse_dotted_names from jedi.evaluate.filters import get_global_filters +from jedi.evaluate.gradual.conversion import stub_to_actual_context_set from jedi.parser_utils import get_statement_of_position @@ -185,7 +186,7 @@ class Completion: # Also true for defining names as a class or function. return list(self._get_class_context_completions(is_function=True)) elif "import_stmt" in nonterminals: - level, names = self._parse_dotted_names(nodes, "import_from" in nonterminals) + level, names = parse_dotted_names(nodes, "import_from" in nonterminals) only_modules = not ("import_from" in nonterminals and 'import' in nodes) completion_names += self._get_importer_names( @@ -233,32 +234,24 @@ class Completion: ) contexts = evaluate_call_of_leaf(evaluation_context, previous_leaf) completion_names = [] - debug.dbg('trailer completion contexts: %s', contexts) + debug.dbg('trailer completion contexts: %s', contexts, color='MAGENTA') for context in contexts: for filter in context.get_filters( - search_global=False, origin_scope=user_context.tree_node): + search_global=False, + origin_scope=user_context.tree_node): completion_names += filter.values() - return completion_names - def _parse_dotted_names(self, nodes, is_import_from): - level = 0 - names = [] - for node in nodes[1:]: - if node in ('.', '...'): - if not names: - level += len(node.value) - elif node.type == 'dotted_name': - names += node.children[::2] - elif node.type == 'name': - names.append(node) - elif node == ',': - if not is_import_from: - names = [] - else: - # Here if the keyword `import` comes along it stops checking - # for names. - break - return level, names + for context in contexts: + if not context.is_stub(): + continue + + actual_contexts = stub_to_actual_context_set(context, ignore_compiled=True) + for c in actual_contexts: + for filter in c.get_filters( + search_global=False, + origin_scope=user_context.tree_node): + completion_names += filter.values() + return completion_names def _get_importer_names(self, names, level=0, only_modules=True): names = [n.value for n in names] @@ -288,5 +281,6 @@ class Completion: next(filters) for filter in filters: for name in filter.values(): + # TODO we should probably check here for properties if (name.api_type == 'function') == is_function: yield name diff --git a/jedi/api/environment.py b/jedi/api/environment.py index 57397f37..c4125d8b 100644 --- a/jedi/api/environment.py +++ b/jedi/api/environment.py @@ -3,23 +3,21 @@ Environments are a way to activate different Python versions or Virtualenvs for static analysis. The Python binary in that environment is going to be executed. """ import os -import re import sys import hashlib import filecmp -from subprocess import PIPE from collections import namedtuple -from jedi._compatibility import GeneralizedPopen, which +from jedi._compatibility import highest_pickle_protocol, which from jedi.cache import memoize_method, time_cache -from jedi.evaluate.compiled.subprocess import get_subprocess, \ +from jedi.evaluate.compiled.subprocess import CompiledSubprocess, \ EvaluatorSameProcess, EvaluatorSubprocess import parso _VersionInfo = namedtuple('VersionInfo', 'major minor micro') -_SUPPORTED_PYTHONS = ['3.6', '3.5', '3.4', '3.3', '2.7'] +_SUPPORTED_PYTHONS = ['3.7', '3.6', '3.5', '3.4', '3.3', '2.7'] _SAFE_PATHS = ['/usr/bin', '/usr/local/bin'] _CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor) @@ -46,49 +44,66 @@ class _BaseEnvironment(object): return self._hash +def _get_info(): + return ( + sys.executable, + sys.prefix, + sys.version_info[:3], + ) + + class Environment(_BaseEnvironment): """ This class is supposed to be created by internal Jedi architecture. You should not create it directly. Please use create_environment or the other functions instead. It is then returned by that function. """ - def __init__(self, path, executable): - self.path = os.path.abspath(path) - """ - The path to an environment, matches ``sys.prefix``. - """ - self.executable = os.path.abspath(executable) + _subprocess = None + + def __init__(self, executable): + self._start_executable = executable + # Initialize the environment + self._get_subprocess() + + def _get_subprocess(self): + if self._subprocess is not None and not self._subprocess.is_crashed: + return self._subprocess + + try: + self._subprocess = CompiledSubprocess(self._start_executable) + info = self._subprocess._send(None, _get_info) + except Exception as exc: + raise InvalidPythonEnvironment( + "Could not get version information for %r: %r" % ( + self._start_executable, + exc)) + + # Since it could change and might not be the same(?) as the one given, + # set it here. + self.executable = info[0] """ The Python executable, matches ``sys.executable``. """ - self.version_info = self._get_version() + self.path = info[1] + """ + The path to an environment, matches ``sys.prefix``. + """ + self.version_info = _VersionInfo(*info[2]) """ - Like ``sys.version_info``. A tuple to show the current Environment's Python version. """ - def _get_version(self): - try: - process = GeneralizedPopen([self.executable, '--version'], stdout=PIPE, stderr=PIPE) - stdout, stderr = process.communicate() - retcode = process.poll() - if retcode: - raise InvalidPythonEnvironment( - "Exited with %d (stdout=%r, stderr=%r)" % ( - retcode, stdout, stderr)) - except OSError as exc: - raise InvalidPythonEnvironment( - "Could not get version information: %r" % exc) + # py2 sends bytes via pickle apparently?! + if self.version_info.major == 2: + self.executable = self.executable.decode() + self.path = self.path.decode() - # Until Python 3.4 wthe version string is part of stderr, after that - # stdout. - output = stdout + stderr - match = re.match(br'Python (\d+)\.(\d+)\.(\d+)', output) - if match is None: - raise InvalidPythonEnvironment("--version not working") + # Adjust pickle protocol according to host and client version. + self._subprocess._pickle_protocol = highest_pickle_protocol([ + sys.version_info, self.version_info]) - return _VersionInfo(*[int(m) for m in match.groups()]) + return self._subprocess def __repr__(self): version = '.'.join(str(i) for i in self.version_info) @@ -97,9 +112,6 @@ class Environment(_BaseEnvironment): def get_evaluator_subprocess(self, evaluator): return EvaluatorSubprocess(evaluator, self._get_subprocess()) - def _get_subprocess(self): - return get_subprocess(self.executable, self.version_info) - @memoize_method def get_sys_path(self): """ @@ -116,18 +128,18 @@ class Environment(_BaseEnvironment): return self._get_subprocess().get_sys_path() -class SameEnvironment(Environment): - def __init__(self): - super(SameEnvironment, self).__init__(sys.prefix, sys.executable) - - def _get_version(self): - return _VersionInfo(*sys.version_info[:3]) - - -class InterpreterEnvironment(_BaseEnvironment): +class _SameEnvironmentMixin(object): def __init__(self): + self._start_executable = self.executable = sys.executable + self.path = sys.prefix self.version_info = _VersionInfo(*sys.version_info[:3]) + +class SameEnvironment(_SameEnvironmentMixin, Environment): + pass + + +class InterpreterEnvironment(_SameEnvironmentMixin, _BaseEnvironment): def get_evaluator_subprocess(self, evaluator): return EvaluatorSameProcess(evaluator) @@ -136,13 +148,22 @@ class InterpreterEnvironment(_BaseEnvironment): def _get_virtual_env_from_var(): + """Get virtualenv environment from VIRTUAL_ENV environment variable. + + It uses `safe=False` with ``create_environment``, because the environment + variable is considered to be safe / controlled by the user solely. + """ var = os.environ.get('VIRTUAL_ENV') - if var is not None: - if var == sys.prefix: - return SameEnvironment() + if var: + # Under macOS in some cases - notably when using Pipenv - the + # sys.prefix of the virtualenv is /path/to/env/bin/.. instead of + # /path/to/env so we need to fully resolve the paths in order to + # compare them. + if os.path.realpath(var) == os.path.realpath(sys.prefix): + return _try_get_same_env() try: - return create_environment(var) + return create_environment(var, safe=False) except InvalidPythonEnvironment: pass @@ -168,16 +189,65 @@ def get_default_environment(): if virtual_env is not None: return virtual_env - for environment in find_system_environments(): - return environment + return _try_get_same_env() - # If no Python Environment is found, use the environment we're already + +def _try_get_same_env(): + env = SameEnvironment() + if not os.path.basename(env.executable).lower().startswith('python'): + # This tries to counter issues with embedding. In some cases (e.g. + # VIM's Python Mac/Windows, sys.executable is /foo/bar/vim. This + # happens, because for Mac a function called `_NSGetExecutablePath` is + # used and for Windows `GetModuleFileNameW`. These are both platform + # specific functions. For all other systems sys.executable should be + # alright. However here we try to generalize: + # + # 1. Check if the executable looks like python (heuristic) + # 2. In case it's not try to find the executable + # 3. In case we don't find it use an interpreter environment. + # + # The last option will always work, but leads to potential crashes of + # Jedi - which is ok, because it happens very rarely and even less, + # because the code below should work for most cases. + if os.name == 'nt': + # The first case would be a virtualenv and the second a normal + # Python installation. + checks = (r'Scripts\python.exe', 'python.exe') + else: + # For unix it looks like Python is always in a bin folder. + checks = ( + 'bin/python%s.%s' % (sys.version_info[0], sys.version[1]), + 'bin/python%s' % (sys.version_info[0]), + 'bin/python', + ) + for check in checks: + guess = os.path.join(sys.exec_prefix, check) + if os.path.isfile(guess): + # Bingo - We think we have our Python. + return Environment(guess) + # It looks like there is no reasonable Python to be found. + return InterpreterEnvironment() + # If no virtualenv is found, use the environment we're already # using. - return SameEnvironment() + return env + + +def get_cached_default_environment(): + var = os.environ.get('VIRTUAL_ENV') + environment = _get_cached_default_environment() + + # Under macOS in some cases - notably when using Pipenv - the + # sys.prefix of the virtualenv is /path/to/env/bin/.. instead of + # /path/to/env so we need to fully resolve the paths in order to + # compare them. + if var and os.path.realpath(var) != os.path.realpath(environment.path): + _get_cached_default_environment.clear_cache() + return _get_cached_default_environment() + return environment @time_cache(seconds=10 * 60) # 10 Minutes -def get_cached_default_environment(): +def _get_cached_default_environment(): return get_default_environment() @@ -222,7 +292,7 @@ def find_virtualenvs(paths=None, **kwargs): try: executable = _get_executable_path(path, safe=safe) - yield Environment(path, executable) + yield Environment(executable) except InvalidPythonEnvironment: pass @@ -246,23 +316,6 @@ def find_system_environments(): pass -# TODO: the logic to find the Python prefix is much more complicated than that. -# See Modules/getpath.c for UNIX and PC/getpathp.c for Windows in CPython's -# source code. A solution would be to deduce it by running the Python -# interpreter and printing the value of sys.prefix. -def _get_python_prefix(executable): - if os.name != 'nt': - return os.path.dirname(os.path.dirname(executable)) - landmark = os.path.join('Lib', 'os.py') - prefix = os.path.dirname(executable) - while prefix: - if os.path.join(prefix, landmark): - return prefix - prefix = os.path.dirname(prefix) - raise InvalidPythonEnvironment( - "Cannot find prefix of executable %s." % executable) - - # TODO: this function should probably return a list of environments since # multiple Python installations can be found on a system for the same version. def get_system_environment(version): @@ -277,17 +330,20 @@ def get_system_environment(version): if exe: if exe == sys.executable: return SameEnvironment() - return Environment(_get_python_prefix(exe), exe) + return Environment(exe) if os.name == 'nt': - for prefix, exe in _get_executables_from_windows_registry(version): - return Environment(prefix, exe) + for exe in _get_executables_from_windows_registry(version): + try: + return Environment(exe) + except InvalidPythonEnvironment: + pass raise InvalidPythonEnvironment("Cannot find executable python%s." % version) def create_environment(path, safe=True): """ - Make it possible to manually create an environment by specifying a + Make it possible to manually create an Environment object by specifying a Virtualenv path or an executable path. :raises: :exc:`.InvalidPythonEnvironment` @@ -295,8 +351,8 @@ def create_environment(path, safe=True): """ if os.path.isfile(path): _assert_safe(path, safe) - return Environment(_get_python_prefix(path), path) - return Environment(path, _get_executable_path(path, safe=safe)) + return Environment(path) + return Environment(_get_executable_path(path, safe=safe)) def _get_executable_path(path, safe=True): @@ -318,16 +374,16 @@ def _get_executable_path(path, safe=True): def _get_executables_from_windows_registry(version): # The winreg module is named _winreg on Python 2. try: - import winreg + import winreg except ImportError: - import _winreg as winreg + import _winreg as winreg # TODO: support Python Anaconda. sub_keys = [ - r'SOFTWARE\Python\PythonCore\{version}\InstallPath', - r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath', - r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath', - r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath' + r'SOFTWARE\Python\PythonCore\{version}\InstallPath', + r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath', + r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath', + r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath' ] for root_key in [winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE]: for sub_key in sub_keys: @@ -337,7 +393,7 @@ def _get_executables_from_windows_registry(version): prefix = winreg.QueryValueEx(key, '')[0] exe = os.path.join(prefix, 'python.exe') if os.path.isfile(exe): - yield prefix, exe + yield exe except WindowsError: pass diff --git a/jedi/api/helpers.py b/jedi/api/helpers.py index cb764cc6..f691707d 100644 --- a/jedi/api/helpers.py +++ b/jedi/api/helpers.py @@ -9,6 +9,7 @@ from parso.python.parser import Parser from parso.python import tree from jedi._compatibility import u +from jedi.evaluate.base_context import NO_CONTEXTS from jedi.evaluate.syntax_tree import eval_atom from jedi.evaluate.helpers import evaluate_call_of_leaf from jedi.evaluate.compiled import get_string_context_set @@ -20,7 +21,7 @@ CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name']) def sorted_definitions(defs): # Note: `or ''` below is required because `module_path` could be - return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0)) + return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0, x.name)) def get_on_completion_name(module_node, lines, position): @@ -105,14 +106,17 @@ def get_stack_at_position(grammar, code_lines, module_node, pos): # TODO This is for now not an official parso API that exists purely # for Jedi. tokens = grammar._tokenize(code) - for token_ in tokens: - if token_.string == safeword: + for token in tokens: + if token.string == safeword: raise EndMarkerReached() - elif token_.prefix.endswith(safeword): + elif token.prefix.endswith(safeword): # This happens with comments. raise EndMarkerReached() + elif token.string.endswith(safeword): + yield token # Probably an f-string literal that was not finished. + raise EndMarkerReached() else: - yield token_ + yield token # The code might be indedented, just remove it. code = dedent(_get_code_for_stack(code_lines, module_node, pos)) @@ -127,25 +131,38 @@ def get_stack_at_position(grammar, code_lines, module_node, pos): p.parse(tokens=tokenize_without_endmarker(code)) except EndMarkerReached: return p.stack - raise SystemError("This really shouldn't happen. There's a bug in Jedi.") + raise SystemError( + "This really shouldn't happen. There's a bug in Jedi:\n%s" + % list(tokenize_without_endmarker(code)) + ) -def evaluate_goto_definition(evaluator, context, leaf): +def evaluate_goto_definition(evaluator, context, leaf, prefer_stubs=False): if leaf.type == 'name': # In case of a name we can just use goto_definition which does all the # magic itself. - return evaluator.goto_definitions(context, leaf) + if prefer_stubs: + return evaluator.goto_stub_definitions(context, leaf) + else: + return evaluator.goto_definitions(context, leaf) parent = leaf.parent + definitions = NO_CONTEXTS if parent.type == 'atom': - return context.eval_node(leaf.parent) + definitions = context.eval_node(leaf.parent) elif parent.type == 'trailer': - return evaluate_call_of_leaf(context, leaf) + definitions = evaluate_call_of_leaf(context, leaf) elif isinstance(leaf, tree.Literal): return eval_atom(context, leaf) elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'): return get_string_context_set(evaluator) - return [] + if prefer_stubs: + return definitions + from jedi.evaluate.gradual.conversion import try_stubs_to_actual_context_set + return try_stubs_to_actual_context_set( + definitions, + prefer_stub_to_compiled=True, + ) CallSignatureDetails = namedtuple( @@ -166,7 +183,8 @@ def _get_index_and_key(nodes, position): if nodes_before: last = nodes_before[-1] - if last.type == 'argument' and last.children[1].end_pos <= position: + if last.type == 'argument' and last.children[1] == '=' \ + and last.children[1].end_pos <= position: # Checked if the argument key_str = last.children[0].value elif last == '=': @@ -249,5 +267,6 @@ def cache_call_signatures(evaluator, context, bracket_leaf, code_lines, user_pos yield evaluate_goto_definition( evaluator, context, - bracket_leaf.get_previous_leaf() + bracket_leaf.get_previous_leaf(), + prefer_stubs=True, ) diff --git a/jedi/api/interpreter.py b/jedi/api/interpreter.py index c9b7bd69..56bb3586 100644 --- a/jedi/api/interpreter.py +++ b/jedi/api/interpreter.py @@ -21,6 +21,7 @@ class NamespaceObject(object): class MixedModuleContext(Context): + # TODO use ContextWrapper! type = 'mixed_module' def __init__(self, evaluator, tree_module, namespaces, path, code_lines): @@ -31,6 +32,7 @@ class MixedModuleContext(Context): self._module_context = ModuleContext( evaluator, tree_module, path=path, + string_names=('__main__',), code_lines=code_lines ) self.tree_node = tree_module diff --git a/jedi/api/keywords.py b/jedi/api/keywords.py index 2991a0f8..1dab29ae 100644 --- a/jedi/api/keywords.py +++ b/jedi/api/keywords.py @@ -1,7 +1,7 @@ import pydoc from jedi.evaluate.utils import ignored -from jedi.evaluate.filters import AbstractNameDefinition +from jedi.evaluate.names import AbstractNameDefinition try: from pydoc_data import topics as pydoc_topics @@ -44,9 +44,15 @@ class Keyword(object): """ For a `parsing.Name` like comparision """ return [self.name] - def py__doc__(self, include_call_signature=False): + def py__doc__(self): return imitate_pydoc(self.name.string_name) + def get_signatures(self): + # TODO this makes no sense, I think Keyword should somehow merge with + # Context to make it easier for the api/classes.py to deal with all + # of it. + return [] + def __repr__(self): return '<%s: %s>' % (type(self).__name__, self.name) diff --git a/jedi/api/project.py b/jedi/api/project.py index ca6992b5..f8f32917 100644 --- a/jedi/api/project.py +++ b/jedi/api/project.py @@ -1,7 +1,7 @@ import os import json -from jedi._compatibility import FileNotFoundError, NotADirectoryError +from jedi._compatibility import FileNotFoundError, NotADirectoryError, PermissionError from jedi.api.environment import SameEnvironment, \ get_cached_default_environment from jedi.api.exceptions import WrongVersion @@ -67,7 +67,7 @@ class Project(object): """ def py2_comp(path, environment=None, sys_path=None, smart_sys_path=True, _django=False): - self._path = path + self._path = os.path.abspath(path) if isinstance(environment, SameEnvironment): self._environment = environment @@ -77,7 +77,8 @@ class Project(object): py2_comp(path, **kwargs) - def _get_base_sys_path(self, environment=None): + @evaluator_as_method_param_cache() + def _get_base_sys_path(self, evaluator, environment=None): if self._sys_path is not None: return self._sys_path @@ -85,7 +86,7 @@ class Project(object): if environment is None: environment = self.get_environment() - sys_path = environment.get_sys_path() + sys_path = list(environment.get_sys_path()) try: sys_path.remove('') except ValueError: @@ -93,7 +94,7 @@ class Project(object): return sys_path @evaluator_as_method_param_cache() - def _get_sys_path(self, evaluator, environment=None): + def _get_sys_path(self, evaluator, environment=None, add_parent_paths=True): """ Keep this method private for all users of jedi. However internally this one is used like a public method. @@ -101,24 +102,20 @@ class Project(object): suffixed = [] prefixed = [] - sys_path = list(self._get_base_sys_path(environment)) + sys_path = list(self._get_base_sys_path(evaluator, environment)) if self._smart_sys_path: prefixed.append(self._path) if evaluator.script_path is not None: suffixed += discover_buildout_paths(evaluator, evaluator.script_path) - traversed = [] - for parent in traverse_parents(evaluator.script_path): - traversed.append(parent) - if parent == self._path: - # Don't go futher than the project path. - break + if add_parent_paths: + traversed = list(traverse_parents(evaluator.script_path)) - # AFAIK some libraries have imports like `foo.foo.bar`, which - # leads to the conclusion to by default prefer longer paths - # rather than shorter ones by default. - suffixed += reversed(traversed) + # AFAIK some libraries have imports like `foo.foo.bar`, which + # leads to the conclusion to by default prefer longer paths + # rather than shorter ones by default. + suffixed += reversed(traversed) if self._django: prefixed.append(self._path) @@ -156,7 +153,7 @@ def _is_django_path(directory): try: with open(os.path.join(directory, 'manage.py'), 'rb') as f: return b"DJANGO_SETTINGS_MODULE" in f.read() - except (FileNotFoundError, NotADirectoryError): + except (FileNotFoundError, NotADirectoryError, PermissionError): return False return False @@ -172,7 +169,7 @@ def get_default_project(path=None): for dir in traverse_parents(check, include_current=True): try: return Project.load(dir) - except (FileNotFoundError, NotADirectoryError): + except (FileNotFoundError, NotADirectoryError, PermissionError): pass if first_no_init_file is None: diff --git a/jedi/api/replstartup.py b/jedi/api/replstartup.py index 4c44a626..3ac84708 100644 --- a/jedi/api/replstartup.py +++ b/jedi/api/replstartup.py @@ -1,6 +1,8 @@ """ To use Jedi completion in Python interpreter, add the following in your shell -setup (e.g., ``.bashrc``):: +setup (e.g., ``.bashrc``). This works only on Linux/Mac, because readline is +not available on Windows. If you still want Jedi autocompletion in your REPL, +just use IPython instead:: export PYTHONSTARTUP="$(python -m jedi repl)" diff --git a/jedi/cache.py b/jedi/cache.py index 6c0c2a83..93e2bd7f 100644 --- a/jedi/cache.py +++ b/jedi/cache.py @@ -126,6 +126,7 @@ def time_cache(seconds): wrapper.clear_cache = lambda: cache.clear() return wrapper + return decorator diff --git a/jedi/common/context.py b/jedi/common/context.py index dfcf5a9e..b690ad33 100644 --- a/jedi/common/context.py +++ b/jedi/common/context.py @@ -12,17 +12,15 @@ class BaseContext(object): class BaseContextSet(object): - def __init__(self, *args): - self._set = set(args) + def __init__(self, iterable): + self._set = frozenset(iterable) + for context in iterable: + assert not isinstance(context, BaseContextSet) @classmethod - def from_iterable(cls, iterable): - return cls.from_set(set(iterable)) - - @classmethod - def from_set(cls, set_): - self = cls() - self._set = set_ + def _from_frozen_set(cls, frozenset_): + self = cls.__new__(cls) + self._set = frozenset_ return self @classmethod @@ -31,16 +29,18 @@ class BaseContextSet(object): Used to work with an iterable of set. """ aggregated = set() - sets = list(sets) for set_ in sets: if isinstance(set_, BaseContextSet): aggregated |= set_._set else: - aggregated |= set_ - return cls.from_set(aggregated) + aggregated |= frozenset(set_) + return cls._from_frozen_set(frozenset(aggregated)) def __or__(self, other): - return type(self).from_set(self._set | other._set) + return self._from_frozen_set(self._set | other._set) + + def __and__(self, other): + return self._from_frozen_set(self._set & other._set) def __iter__(self): for element in self._set: @@ -56,12 +56,18 @@ class BaseContextSet(object): return '%s(%s)' % (self.__class__.__name__, ', '.join(str(s) for s in self._set)) def filter(self, filter_func): - return type(self).from_iterable(filter(filter_func, self._set)) + return self.__class__(filter(filter_func, self._set)) def __getattr__(self, name): def mapper(*args, **kwargs): - return type(self).from_sets( + return self.from_sets( getattr(context, name)(*args, **kwargs) for context in self._set ) return mapper + + def __eq__(self, other): + return self._set == other._set + + def __hash__(self): + return hash(self._set) diff --git a/jedi/debug.py b/jedi/debug.py index a4fd8646..02719290 100644 --- a/jedi/debug.py +++ b/jedi/debug.py @@ -2,16 +2,17 @@ from jedi._compatibility import encoding, is_py3, u import os import time +_inited = False + + def _lazy_colorama_init(): """ - Lazily init colorama if necessary, not to screw up stdout is debug not - enabled. + Lazily init colorama if necessary, not to screw up stdout if debugging is + not enabled. This version of the function does nothing. """ - pass -_inited=False try: if os.name == 'nt': @@ -21,7 +22,8 @@ try: # Use colorama for nicer console output. from colorama import Fore, init from colorama import initialise - def _lazy_colorama_init(): + + def _lazy_colorama_init(): # noqa: F811 """ Lazily init colorama if necessary, not to screw up stdout is debug not enabled. @@ -49,6 +51,7 @@ except ImportError: YELLOW = '' MAGENTA = '' RESET = '' + BLUE = '' NOTICE = object() WARNING = object() diff --git a/jedi/evaluate/__init__.py b/jedi/evaluate/__init__.py index 3ba52b89..b55bf283 100644 --- a/jedi/evaluate/__init__.py +++ b/jedi/evaluate/__init__.py @@ -62,10 +62,12 @@ I need to mention now that lazy evaluation is really good because it only *evaluates* what needs to be *evaluated*. All the statements and modules that are not used are just being ignored. """ +from functools import partial from parso.python import tree import parso from parso import python_bytes_to_unicode +from parso.file_io import FileIO from jedi import debug from jedi import parser_utils @@ -73,9 +75,8 @@ from jedi.evaluate.utils import unite from jedi.evaluate import imports from jedi.evaluate import recursion from jedi.evaluate.cache import evaluator_function_cache -from jedi.evaluate import compiled from jedi.evaluate import helpers -from jedi.evaluate.filters import TreeNameDefinition, ParamName +from jedi.evaluate.names import TreeNameDefinition, ParamName from jedi.evaluate.base_context import ContextualizedName, ContextualizedNode, \ ContextSet, NO_CONTEXTS, iterate_contexts from jedi.evaluate.context import ClassContext, FunctionContext, \ @@ -83,6 +84,20 @@ from jedi.evaluate.context import ClassContext, FunctionContext, \ from jedi.evaluate.context.iterable import CompForContext from jedi.evaluate.syntax_tree import eval_trailer, eval_expr_stmt, \ eval_node, check_tuple_assignments +from jedi.evaluate.gradual.conversion import try_stub_to_actual_names, \ + try_stubs_to_actual_context_set + + +def _execute(context, arguments): + try: + func = context.py__call__ + except AttributeError: + debug.warning("no execution possible %s", context) + return NO_CONTEXTS + else: + context_set = func(arguments=arguments) + debug.dbg('execute result: %s in %s', context_set, context) + return context_set class Evaluator(object): @@ -94,9 +109,10 @@ class Evaluator(object): self.compiled_subprocess = environment.get_evaluator_subprocess(self) self.grammar = environment.get_grammar() - self.latest_grammar = parso.load_grammar(version='3.6') + self.latest_grammar = parso.load_grammar(version='3.7') self.memoize_cache = {} # for memoize decorators self.module_cache = imports.ModuleCache() # does the job of `sys.modules`. + self.stub_module_cache = {} # Dict[Tuple[str, ...], Optional[ModuleContext]] self.compiled_cache = {} # see `evaluate.compiled.create()` self.inferred_element_counts = {} self.mixed_cache = {} # see `evaluate.compiled.mixed._create()` @@ -105,24 +121,59 @@ class Evaluator(object): self.is_analysis = False self.project = project self.access_cache = {} + # This setting is only temporary to limit the work we have to do with + # tensorflow and others. + self.infer_enabled = True self.reset_recursion_limitations() self.allow_different_encoding = True + # Plugin API + from jedi.plugins import plugin_manager + plugin_callbacks = plugin_manager.get_callbacks(self) + self.execute = plugin_callbacks.decorate('execute', callback=_execute) + self._import_module = partial( + plugin_callbacks.decorate( + 'import_module', + callback=imports.import_module + ), + self, + ) + + def import_module(self, import_names, parent_module_context=None, + sys_path=None, prefer_stubs=True): + if sys_path is None: + sys_path = self.get_sys_path() + return self._import_module(import_names, parent_module_context, + sys_path, prefer_stubs=prefer_stubs) + @property @evaluator_function_cache() def builtins_module(self): - return compiled.get_special_object(self, u'BUILTINS') + module_name = u'builtins' + if self.environment.version_info.major == 2: + module_name = u'__builtin__' + builtins_module, = self.import_module((module_name,), sys_path=()) + return builtins_module + + @property + @evaluator_function_cache() + def typing_module(self): + typing_module, = self.import_module((u'typing',)) + return typing_module def reset_recursion_limitations(self): self.recursion_detector = recursion.RecursionDetector() self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self) - def get_sys_path(self): + def get_sys_path(self, **kwargs): """Convenience function""" - return self.project._get_sys_path(self, environment=self.environment) + return self.project._get_sys_path(self, environment=self.environment, **kwargs) def eval_element(self, context, element): + if not self.infer_enabled: + return NO_CONTEXTS + if isinstance(context, CompForContext): return eval_node(context, element) @@ -173,14 +224,14 @@ class Evaluator(object): new_name_dicts = list(original_name_dicts) for i, name_dict in enumerate(new_name_dicts): new_name_dicts[i] = name_dict.copy() - new_name_dicts[i][if_name.value] = ContextSet(definition) + new_name_dicts[i][if_name.value] = ContextSet([definition]) name_dicts += new_name_dicts else: for name_dict in name_dicts: name_dict[if_name.value] = definitions if len(name_dicts) > 1: - result = ContextSet() + result = NO_CONTEXTS for name_dict in name_dicts: with helpers.predefine_names(context, if_stmt, name_dict): result |= eval_node(context, element) @@ -210,13 +261,23 @@ class Evaluator(object): return eval_node(context, element) def goto_definitions(self, context, name): + # We don't want stubs here we want the actual contexts, if possible. + return try_stubs_to_actual_context_set( + self.goto_stub_definitions(context, name), + prefer_stub_to_compiled=True + ) + + def goto_stub_definitions(self, context, name): def_ = name.get_definition(import_name_always=True) if def_ is not None: type_ = def_.type - if type_ == 'classdef': - return [ClassContext(self, context, name.parent)] - elif type_ == 'funcdef': - return [FunctionContext(self, context, name.parent)] + is_classdef = type_ == 'classdef' + if is_classdef or type_ == 'funcdef': + if is_classdef: + c = ClassContext(self, context, name.parent) + else: + c = FunctionContext.from_context(context, name.parent) + return ContextSet([c]) if type_ == 'expr_stmt': is_simple_name = name.parent.type not in ('power', 'trailer') @@ -230,10 +291,46 @@ class Evaluator(object): return check_tuple_assignments(self, c_node, for_types) if type_ in ('import_from', 'import_name'): return imports.infer_import(context, name) + else: + result = self._follow_error_node_imports_if_possible(context, name) + if result is not None: + return result return helpers.evaluate_call_of_leaf(context, name) + def _follow_error_node_imports_if_possible(self, context, name): + error_node = tree.search_ancestor(name, 'error_node') + if error_node is not None: + # Get the first command start of a started simple_stmt. The error + # node is sometimes a small_stmt and sometimes a simple_stmt. Check + # for ; leaves that start a new statements. + start_index = 0 + for index, n in enumerate(error_node.children): + if n.start_pos > name.start_pos: + break + if n == ';': + start_index = index + 1 + nodes = error_node.children[start_index:] + first_name = nodes[0].get_first_leaf().value + + # Make it possible to infer stuff like `import foo.` or + # `from foo.bar`. + if first_name in ('from', 'import'): + is_import_from = first_name == 'from' + level, names = helpers.parse_dotted_names( + nodes, + is_import_from=is_import_from, + until_node=name, + ) + return imports.Importer(self, names, context.get_root_context(), level).follow() + return None + def goto(self, context, name): + names = self._goto(context, name) + names = try_stub_to_actual_names(names, prefer_stub_to_compiled=True) + return names + + def _goto(self, context, name): definition = name.get_definition(import_name_always=True) if definition is not None: type_ = definition.type @@ -250,6 +347,10 @@ class Evaluator(object): elif type_ in ('import_from', 'import_name'): module_names = imports.infer_import(context, name, is_goto=True) return module_names + else: + contexts = self._follow_error_node_imports_if_possible(context, name) + if contexts is not None: + return [context.name for context in contexts] par = name.parent node_type = par.type @@ -271,12 +372,8 @@ class Evaluator(object): context_set = eval_trailer(context, context_set, trailer) param_names = [] for context in context_set: - try: - get_param_names = context.get_param_names - except AttributeError: - pass - else: - for param_name in get_param_names(): + for signature in context.get_signatures(): + for param_name in signature.get_param_names(): if param_name.string_name == name.value: param_names.append(param_name) return param_names @@ -325,36 +422,32 @@ class Evaluator(object): if n.type == 'comp_for': return n - def from_scope_node(scope_node, child_is_funcdef=None, is_nested=True, node_is_object=False): + def from_scope_node(scope_node, is_nested=True, node_is_object=False): if scope_node == base_node: return base_context is_funcdef = scope_node.type in ('funcdef', 'lambdef') parent_scope = parser_utils.get_parent_scope(scope_node) - parent_context = from_scope_node(parent_scope, child_is_funcdef=is_funcdef) + parent_context = from_scope_node(parent_scope) if is_funcdef: - if isinstance(parent_context, AnonymousInstance): + parent_was_class = parent_context.is_class() + if parent_was_class: + parent_context = AnonymousInstance( + self, parent_context.parent_context, parent_context) + + func = FunctionContext.from_context(parent_context, scope_node) + + if parent_was_class: func = BoundMethod( - self, parent_context, parent_context.class_context, - parent_context.parent_context, scope_node - ) - else: - func = FunctionContext( - self, - parent_context, - scope_node + instance=parent_context, + function=func ) if is_nested and not node_is_object: return func.get_function_execution() return func elif scope_node.type == 'classdef': - class_context = ClassContext(self, parent_context, scope_node) - if child_is_funcdef: - # anonymous instance - return AnonymousInstance(self, parent_context, class_context) - else: - return class_context + return ClassContext(self, parent_context, scope_node) elif scope_node.type == 'comp_for': if node.start_pos >= scope_node.children[-1].start_pos: return parent_context @@ -373,14 +466,17 @@ class Evaluator(object): scope_node = parent_scope(node) return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object) - def parse_and_get_code(self, code=None, path=None, **kwargs): + def parse_and_get_code(self, code=None, path=None, encoding='utf-8', + use_latest_grammar=False, file_io=None, **kwargs): if self.allow_different_encoding: if code is None: - with open(path, 'rb') as f: - code = f.read() - code = python_bytes_to_unicode(code, errors='replace') + if file_io is None: + file_io = FileIO(path) + code = file_io.read() + code = python_bytes_to_unicode(code, encoding=encoding, errors='replace') - return self.grammar.parse(code=code, path=path, **kwargs), code + grammar = self.latest_grammar if use_latest_grammar else self.grammar + return grammar.parse(code=code, path=path, file_io=file_io, **kwargs), code def parse(self, *args, **kwargs): return self.parse_and_get_code(*args, **kwargs)[0] diff --git a/jedi/evaluate/analysis.py b/jedi/evaluate/analysis.py index ded4e9f2..be42e43d 100644 --- a/jedi/evaluate/analysis.py +++ b/jedi/evaluate/analysis.py @@ -5,7 +5,6 @@ from parso.python import tree from jedi._compatibility import force_unicode from jedi import debug -from jedi.evaluate.compiled import CompiledObject from jedi.evaluate.helpers import is_string @@ -86,37 +85,40 @@ def add(node_context, error_name, node, message=None, typ=Error, payload=None): # TODO this path is probably not right module_context = node_context.get_root_context() module_path = module_context.py__file__() - instance = typ(error_name, module_path, node.start_pos, message) - debug.warning(str(instance), format=False) - node_context.evaluator.analysis.append(instance) + issue_instance = typ(error_name, module_path, node.start_pos, message) + debug.warning(str(issue_instance), format=False) + node_context.evaluator.analysis.append(issue_instance) + return issue_instance def _check_for_setattr(instance): """ Check if there's any setattr method inside an instance. If so, return True. """ - from jedi.evaluate.context import ModuleContext module = instance.get_root_context() - if not isinstance(module, ModuleContext): + node = module.tree_node + if node is None: + # If it's a compiled module or doesn't have a tree_node return False - node = module.tree_node try: - stmts = node.get_used_names()['setattr'] + stmt_names = node.get_used_names()['setattr'] except KeyError: return False - return any(node.start_pos < stmt.start_pos < node.end_pos - for stmt in stmts) + return any(node.start_pos < n.start_pos < node.end_pos + # Check if it's a function called setattr. + and not (n.parent.type == 'funcdef' and n.parent.name == n) + for n in stmt_names) def add_attribute_error(name_context, lookup_context, name): message = ('AttributeError: %s has no attribute %s.' % (lookup_context, name)) - from jedi.evaluate.context.instance import AbstractInstanceContext, CompiledInstanceName + from jedi.evaluate.context.instance import CompiledInstanceName # Check for __getattr__/__getattribute__ existance and issue a warning # instead of an error, if that happens. typ = Error - if isinstance(lookup_context, AbstractInstanceContext): + if lookup_context.is_instance(): slot_names = lookup_context.get_function_slot_names(u'__getattr__') + \ lookup_context.get_function_slot_names(u'__getattribute__') for n in slot_names: @@ -142,11 +144,15 @@ def _check_for_exception_catch(node_context, jedi_name, exception, payload=None) Returns True if the exception was catched. """ def check_match(cls, exception): - try: - return isinstance(cls, CompiledObject) and cls.is_super_class(exception) - except TypeError: + if not cls.is_class(): return False + for python_cls in exception.mro(): + if cls.py__name__() == python_cls.__name__ \ + and cls.parent_context == cls.evaluator.builtins_module: + return True + return False + def check_try_for_except(obj, exception): # Only nodes in try iterator = iter(obj.children) diff --git a/jedi/evaluate/arguments.py b/jedi/evaluate/arguments.py index beab4c8c..2642f487 100644 --- a/jedi/evaluate/arguments.py +++ b/jedi/evaluate/arguments.py @@ -1,14 +1,17 @@ +import re + from parso.python import tree from jedi._compatibility import zip_longest from jedi import debug +from jedi.evaluate.utils import PushBackIterator from jedi.evaluate import analysis from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \ LazyTreeContext, get_merged_lazy_context -from jedi.evaluate.filters import ParamName -from jedi.evaluate.base_context import NO_CONTEXTS +from jedi.evaluate.names import ParamName, TreeNameDefinition +from jedi.evaluate.base_context import NO_CONTEXTS, ContextSet, ContextualizedNode from jedi.evaluate.context import iterable -from jedi.evaluate.param import get_params, ExecutedParam +from jedi.evaluate.param import get_executed_params_and_issues, ExecutedParam def try_iter_content(types, depth=0): @@ -28,32 +31,104 @@ def try_iter_content(types, depth=0): try_iter_content(lazy_context.infer(), depth + 1) -class AbstractArguments(object): - context = None - argument_node = None - trailer = None +class ParamIssue(Exception): + pass - def eval_argument_clinic(self, parameters): - """Uses a list with argument clinic information (see PEP 436).""" - iterator = self.unpack() - for i, (name, optional, allow_kwargs) in enumerate(parameters): - key, argument = next(iterator, (None, None)) - if key is not None: - raise NotImplementedError - if argument is None and not optional: - debug.warning('TypeError: %s expected at least %s arguments, got %s', - name, len(parameters), i) - raise ValueError - values = NO_CONTEXTS if argument is None else argument.infer() - if not values and not optional: - # For the stdlib we always want values. If we don't get them, - # that's ok, maybe something is too hard to resolve, however, - # we will not proceed with the evaluation of that function. - debug.warning('argument_clinic "%s" not resolvable.', name) - raise ValueError - yield values +def repack_with_argument_clinic(string, keep_arguments_param=False): + """ + Transforms a function or method with arguments to the signature that is + given as an argument clinic notation. + Argument clinic is part of CPython and used for all the functions that are + implemented in C (Python 3.7): + + str.split.__text_signature__ + # Results in: '($self, /, sep=None, maxsplit=-1)' + """ + clinic_args = list(_parse_argument_clinic(string)) + + def decorator(func): + def wrapper(context, *args, **kwargs): + if keep_arguments_param: + arguments = kwargs['arguments'] + else: + arguments = kwargs.pop('arguments') + try: + args += tuple(_iterate_argument_clinic( + context.evaluator, + arguments, + clinic_args + )) + except ParamIssue: + return NO_CONTEXTS + else: + return func(context, *args, **kwargs) + + return wrapper + return decorator + + +def _iterate_argument_clinic(evaluator, arguments, parameters): + """Uses a list with argument clinic information (see PEP 436).""" + iterator = PushBackIterator(arguments.unpack()) + for i, (name, optional, allow_kwargs, stars) in enumerate(parameters): + if stars == 1: + lazy_contexts = [] + for key, argument in iterator: + if key is not None: + iterator.push_back((key, argument)) + break + + lazy_contexts.append(argument) + yield ContextSet([iterable.FakeSequence(evaluator, u'tuple', lazy_contexts)]) + lazy_contexts + continue + elif stars == 2: + raise NotImplementedError() + key, argument = next(iterator, (None, None)) + if key is not None: + debug.warning('Keyword arguments in argument clinic are currently not supported.') + raise ParamIssue + if argument is None and not optional: + debug.warning('TypeError: %s expected at least %s arguments, got %s', + name, len(parameters), i) + raise ParamIssue + + context_set = NO_CONTEXTS if argument is None else argument.infer() + + if not context_set and not optional: + # For the stdlib we always want values. If we don't get them, + # that's ok, maybe something is too hard to resolve, however, + # we will not proceed with the evaluation of that function. + debug.warning('argument_clinic "%s" not resolvable.', name) + raise ParamIssue + yield context_set + + +def _parse_argument_clinic(string): + allow_kwargs = False + optional = False + while string: + # Optional arguments have to begin with a bracket. And should always be + # at the end of the arguments. This is therefore not a proper argument + # clinic implementation. `range()` for exmple allows an optional start + # value at the beginning. + match = re.match(r'(?:(?:(\[),? ?|, ?|)(\**\w+)|, ?/)\]*', string) + string = string[len(match.group(0)):] + if not match.group(2): # A slash -> allow named arguments + allow_kwargs = True + continue + optional = optional or bool(match.group(1)) + word = match.group(2) + stars = word.count('*') + word = word[stars:] + yield (word, optional, allow_kwargs, stars) + if stars: + allow_kwargs = True + + +class _AbstractArgumentsMixin(object): def eval_all(self, funcdef=None): """ Evaluates all arguments as a support for static analysis @@ -63,24 +138,60 @@ class AbstractArguments(object): types = lazy_context.infer() try_iter_content(types) - def get_calling_nodes(self): - raise NotImplementedError - def unpack(self, funcdef=None): raise NotImplementedError - def get_params(self, execution_context): - return get_params(execution_context, self) + def get_executed_params_and_issues(self, execution_context): + return get_executed_params_and_issues(execution_context, self) + + def get_calling_nodes(self): + return [] + + +class AbstractArguments(_AbstractArgumentsMixin): + context = None + argument_node = None + trailer = None class AnonymousArguments(AbstractArguments): - def get_params(self, execution_context): + def get_executed_params_and_issues(self, execution_context): from jedi.evaluate.dynamic import search_params return search_params( execution_context.evaluator, execution_context, execution_context.tree_node - ) + ), [] + + def __repr__(self): + return '%s()' % self.__class__.__name__ + + +def unpack_arglist(arglist): + if arglist is None: + return + + # Allow testlist here as well for Python2's class inheritance + # definitions. + if not (arglist.type in ('arglist', 'testlist') or ( + # in python 3.5 **arg is an argument, not arglist + (arglist.type == 'argument') and + arglist.children[0] in ('*', '**'))): + yield 0, arglist + return + + iterator = iter(arglist.children) + for child in iterator: + if child == ',': + continue + elif child in ('*', '**'): + yield len(child.value), next(iterator) + elif child.type == 'argument' and \ + child.children[0] in ('*', '**'): + assert len(child.children) == 2 + yield len(child.children[0].value), child.children[1] + else: + yield 0, child class TreeArguments(AbstractArguments): @@ -97,35 +208,9 @@ class TreeArguments(AbstractArguments): self._evaluator = evaluator self.trailer = trailer # Can be None, e.g. in a class definition. - def _split(self): - if self.argument_node is None: - return - - # Allow testlist here as well for Python2's class inheritance - # definitions. - if not (self.argument_node.type in ('arglist', 'testlist') or ( - # in python 3.5 **arg is an argument, not arglist - (self.argument_node.type == 'argument') and - self.argument_node.children[0] in ('*', '**'))): - yield 0, self.argument_node - return - - iterator = iter(self.argument_node.children) - for child in iterator: - if child == ',': - continue - elif child in ('*', '**'): - yield len(child.value), next(iterator) - elif child.type == 'argument' and \ - child.children[0] in ('*', '**'): - assert len(child.children) == 2 - yield len(child.children[0].value), child.children[1] - else: - yield 0, child - def unpack(self, funcdef=None): named_args = [] - for star_count, el in self._split(): + for star_count, el in unpack_arglist(self.argument_node): if star_count == 1: arrays = self.context.eval_node(el) iterators = [_iterate_star_args(self.context, a, el, funcdef) @@ -154,24 +239,32 @@ class TreeArguments(AbstractArguments): else: yield None, LazyTreeContext(self.context, el) - # Reordering var_args is necessary, because star args sometimes appear + # Reordering arguments is necessary, because star args sometimes appear # after named argument, but in the actual order it's prepended. for named_arg in named_args: yield named_arg - def as_tree_tuple_objects(self): - for star_count, argument in self._split(): + def _as_tree_tuple_objects(self): + for star_count, argument in unpack_arglist(self.argument_node): if argument.type == 'argument': argument, default = argument.children[::2] else: default = None yield argument, default, star_count + def iter_calling_names_with_star(self): + for name, default, star_count in self._as_tree_tuple_objects(): + # TODO this function is a bit strange. probably refactor? + if not star_count or not isinstance(name, tree.Name): + continue + + yield TreeNameDefinition(self.context, name) + def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.argument_node) def get_calling_nodes(self): - from jedi.evaluate.dynamic import MergedExecutedParams + from jedi.evaluate.dynamic import DynamicExecutedParams old_arguments_list = [] arguments = self @@ -180,17 +273,14 @@ class TreeArguments(AbstractArguments): break old_arguments_list.append(arguments) - for name, default, star_count in reversed(list(arguments.as_tree_tuple_objects())): - if not star_count or not isinstance(name, tree.Name): - continue - - names = self._evaluator.goto(arguments.context, name) + for calling_name in reversed(list(arguments.iter_calling_names_with_star())): + names = calling_name.goto() if len(names) != 1: break if not isinstance(names[0], ParamName): break param = names[0].get_param() - if isinstance(param, MergedExecutedParams): + if isinstance(param, DynamicExecutedParams): # For dynamic searches we don't even want to see errors. return [] if not isinstance(param, ExecutedParam): @@ -201,9 +291,9 @@ class TreeArguments(AbstractArguments): break if arguments.argument_node is not None: - return [arguments.argument_node] + return [ContextualizedNode(arguments.context, arguments.argument_node)] if arguments.trailer is not None: - return [arguments.trailer] + return [ContextualizedNode(arguments.context, arguments.trailer)] return [] @@ -215,22 +305,47 @@ class ValuesArguments(AbstractArguments): for values in self._values_list: yield None, LazyKnownContexts(values) - def get_calling_nodes(self): - return [] - def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self._values_list) +class TreeArgumentsWrapper(_AbstractArgumentsMixin): + def __init__(self, arguments): + self._wrapped_arguments = arguments + + @property + def context(self): + return self._wrapped_arguments.context + + @property + def argument_node(self): + return self._wrapped_arguments.argument_node + + @property + def trailer(self): + return self._wrapped_arguments.trailer + + def unpack(self, func=None): + raise NotImplementedError + + def get_calling_nodes(self): + return self._wrapped_arguments.get_calling_nodes() + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._wrapped_arguments) + + def _iterate_star_args(context, array, input_node, funcdef=None): - try: - iter_ = array.py__iter__ - except AttributeError: + if not array.py__getattribute__('__iter__'): if funcdef is not None: # TODO this funcdef should not be needed. m = "TypeError: %s() argument after * must be a sequence, not %s" \ % (funcdef.name.value, array) analysis.add(context, 'type-error-star', input_node, message=m) + try: + iter_ = array.py__iter__ + except AttributeError: + pass else: for lazy_context in iter_(): yield lazy_context diff --git a/jedi/evaluate/base_context.py b/jedi/evaluate/base_context.py index 3f0a198c..5327763d 100644 --- a/jedi/evaluate/base_context.py +++ b/jedi/evaluate/base_context.py @@ -6,143 +6,49 @@ A ContextSet is typically used to specify the return of a function or any other static analysis operation. In jedi there are always multiple returns and not just one. """ +from functools import reduce +from operator import add from parso.python.tree import ExprStmt, CompFor from jedi import debug from jedi._compatibility import Python3Method, zip_longest, unicode -from jedi.parser_utils import clean_scope_docstring, get_doc_with_call_signature +from jedi.parser_utils import clean_scope_docstring from jedi.common import BaseContextSet, BaseContext -from jedi.evaluate.helpers import EvaluatorIndexError, EvaluatorTypeError, \ - EvaluatorKeyError +from jedi.evaluate.helpers import SimpleGetItemNotFound, execute_evaluated +from jedi.evaluate.utils import safe_property +from jedi.evaluate.cache import evaluator_as_method_param_cache -class Context(BaseContext): - """ - Should be defined, otherwise the API returns empty types. - """ +class HelperContextMixin(object): + def get_root_context(self): + context = self + while True: + if context.parent_context is None: + return context + context = context.parent_context - predefined_names = {} - tree_node = None - """ - To be defined by subclasses. - """ + @classmethod + @evaluator_as_method_param_cache() + def create_cached(cls, *args, **kwargs): + return cls(*args, **kwargs) - @property - def api_type(self): - # By default just lower name of the class. Can and should be - # overwritten. - return self.__class__.__name__.lower() - - @debug.increase_indent - def execute(self, arguments): - """ - In contrast to py__call__ this function is always available. - - `hasattr(x, py__call__)` can also be checked to see if a context is - executable. - """ - if self.evaluator.is_analysis: - arguments.eval_all() - - debug.dbg('execute: %s %s', self, arguments) - from jedi.evaluate import stdlib - try: - # Some stdlib functions like super(), namedtuple(), etc. have been - # hard-coded in Jedi to support them. - return stdlib.execute(self.evaluator, self, arguments) - except stdlib.NotInStdLib: - pass - - try: - func = self.py__call__ - except AttributeError: - debug.warning("no execution possible %s", self) - return NO_CONTEXTS - else: - context_set = func(arguments) - debug.dbg('execute result: %s in %s', context_set, self) - return context_set - - return self.evaluator.execute(self, arguments) + def execute(self, arguments=None): + return self.evaluator.execute(self, arguments=arguments) def execute_evaluated(self, *value_list): - """ - Execute a function with already executed arguments. - """ - from jedi.evaluate.arguments import ValuesArguments - arguments = ValuesArguments([ContextSet(value) for value in value_list]) - return self.execute(arguments) + return execute_evaluated(self, *value_list) - def iterate(self, contextualized_node=None, is_async=False): - debug.dbg('iterate %s', self) - try: - if is_async: - iter_method = self.py__aiter__ - else: - iter_method = self.py__iter__ - except AttributeError: - if contextualized_node is not None: - from jedi.evaluate import analysis - analysis.add( - contextualized_node.context, - 'type-error-not-iterable', - contextualized_node.node, - message="TypeError: '%s' object is not iterable" % self) - return iter([]) - else: - return iter_method() + def execute_annotation(self): + return self.execute_evaluated() - def get_item(self, index_contexts, contextualized_node): - from jedi.evaluate.compiled import CompiledObject - from jedi.evaluate.context.iterable import Slice, Sequence - result = ContextSet() + def gather_annotation_classes(self): + return ContextSet([self]) - for index in index_contexts: - if isinstance(index, Slice): - index = index.obj - if isinstance(index, CompiledObject): - try: - index = index.get_safe_value() - except ValueError: - pass - - if type(index) not in (float, int, str, unicode, slice, bytes): - # If the index is not clearly defined, we have to get all the - # possiblities. - if isinstance(self, Sequence) and self.array_type == 'dict': - result |= self.dict_values() - else: - result |= iterate_contexts(ContextSet(self)) - continue - - # The actual getitem call. - try: - getitem = self.py__getitem__ - except AttributeError: - from jedi.evaluate import analysis - # TODO this context is probably not right. - analysis.add( - contextualized_node.context, - 'type-error-not-subscriptable', - contextualized_node.node, - message="TypeError: '%s' object is not subscriptable" % self - ) - else: - try: - result |= getitem(index) - except EvaluatorIndexError: - result |= iterate_contexts(ContextSet(self)) - except EvaluatorKeyError: - # Must be a dict. Lists don't raise KeyErrors. - result |= self.dict_values() - except EvaluatorTypeError: - # The type is wrong and therefore it makes no sense to do - # anything anymore. - result = NO_CONTEXTS - return result - - def eval_node(self, node): - return self.evaluator.eval_element(self, node) + def merge_types_of_iterate(self, contextualized_node=None, is_async=False): + return ContextSet.from_sets( + lazy_context.infer() + for lazy_context in self.iterate(contextualized_node, is_async) + ) @Python3Method def py__getattribute__(self, name_or_str, name_context=None, position=None, @@ -161,12 +67,103 @@ class Context(BaseContext): return f.filter_name(filters) return f.find(filters, attribute_lookup=not search_global) + def eval_node(self, node): + return self.evaluator.eval_element(self, node) + def create_context(self, node, node_is_context=False, node_is_object=False): return self.evaluator.create_context(self, node, node_is_context, node_is_object) + def iterate(self, contextualized_node=None, is_async=False): + debug.dbg('iterate %s', self) + if is_async: + from jedi.evaluate.lazy_context import LazyKnownContexts + # TODO if no __aiter__ contexts are there, error should be: + # TypeError: 'async for' requires an object with __aiter__ method, got int + return iter([ + LazyKnownContexts( + self.py__getattribute__('__aiter__').execute_evaluated() + .py__getattribute__('__anext__').execute_evaluated() + .py__getattribute__('__await__').execute_evaluated() + .py__stop_iteration_returns() + ) # noqa + ]) + return self.py__iter__(contextualized_node) + + def is_sub_class_of(self, class_context): + for cls in self.py__mro__(): + if cls.is_same_class(class_context): + return True + return False + + def is_same_class(self, class2): + # Class matching should prefer comparisons that are not this function. + if type(class2).is_same_class != HelperContextMixin.is_same_class: + return class2.is_same_class(self) + return self == class2 + + def is_stub(self): + # The root context knows if it's a stub or not. + return self.parent_context.is_stub() + + +class Context(HelperContextMixin, BaseContext): + """ + Should be defined, otherwise the API returns empty types. + """ + predefined_names = {} + """ + To be defined by subclasses. + """ + tree_node = None + + @property + def api_type(self): + # By default just lower name of the class. Can and should be + # overwritten. + return self.__class__.__name__.lower() + + def py__getitem__(self, index_context_set, contextualized_node): + from jedi.evaluate import analysis + # TODO this context is probably not right. + analysis.add( + contextualized_node.context, + 'type-error-not-subscriptable', + contextualized_node.node, + message="TypeError: '%s' object is not subscriptable" % self + ) + return NO_CONTEXTS + + def py__iter__(self, contextualized_node=None): + if contextualized_node is not None: + from jedi.evaluate import analysis + analysis.add( + contextualized_node.context, + 'type-error-not-iterable', + contextualized_node.node, + message="TypeError: '%s' object is not iterable" % self) + return iter([]) + + def get_signatures(self): + return [] + def is_class(self): return False + def is_instance(self): + return False + + def is_function(self): + return False + + def is_module(self): + return False + + def is_namespace(self): + return False + + def is_compiled(self): + return False + def py__bool__(self): """ Since Wrapper is a super class for classes, functions and modules, @@ -174,16 +171,21 @@ class Context(BaseContext): """ return True - def py__doc__(self, include_call_signature=False): + def py__doc__(self): try: self.tree_node.get_doc_node except AttributeError: return '' else: - if include_call_signature: - return get_doc_with_call_signature(self.tree_node) - else: - return clean_scope_docstring(self.tree_node) + return clean_scope_docstring(self.tree_node) + return None + + def py__stop_iteration_returns(self): + debug.warning("Not possible to return the stop iterations of %s", self) + return NO_CONTEXTS + + def get_qualified_names(self): + # Returns Optional[List[str]] return None @@ -198,10 +200,39 @@ def iterate_contexts(contexts, contextualized_node=None, is_async=False): ) +class ContextWrapper(HelperContextMixin, object): + py__getattribute__ = Context.py__getattribute__ + + def __init__(self, wrapped_context): + self._wrapped_context = wrapped_context + + @safe_property + def name(self): + from jedi.evaluate.names import ContextName + wrapped_name = self._wrapped_context.name + if wrapped_name.tree_name is not None: + return ContextName(self, wrapped_name.tree_name) + else: + from jedi.evaluate.compiled import CompiledContextName + return CompiledContextName(self, wrapped_name.string_name) + + @classmethod + @evaluator_as_method_param_cache() + def create_cached(cls, evaluator, *args, **kwargs): + return cls(*args, **kwargs) + + def __getattr__(self, name): + return getattr(self._wrapped_context, name) + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self._wrapped_context) + + class TreeContext(Context): - def __init__(self, evaluator, parent_context=None): + def __init__(self, evaluator, parent_context, tree_node): super(TreeContext, self).__init__(evaluator, parent_context) self.predefined_names = {} + self.tree_node = tree_node def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.tree_node) @@ -218,6 +249,9 @@ class ContextualizedNode(object): def infer(self): return self.context.eval_node(self.node) + def __repr__(self): + return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context) + class ContextualizedName(ContextualizedNode): # TODO merge with TreeNameDefinition?! @@ -235,18 +269,30 @@ class ContextualizedName(ContextualizedNode): x, (y, z) = 2, '' would result in ``[(1, xyz_node), (0, yz_node)]``. + + When searching for b in the case ``a, *b, c = [...]`` it will return:: + + [(slice(1, -1), abc_node)] """ indexes = [] + is_star_expr = False node = self.node.parent compare = self.node while node is not None: if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'): for i, child in enumerate(node.children): if child == compare: - indexes.insert(0, (int(i / 2), node)) + index = int(i / 2) + if is_star_expr: + from_end = int((len(node.children) - i) / 2) + index = slice(index, -from_end) + indexes.insert(0, (index, node)) break else: raise LookupError("Couldn't find the assignment.") + is_star_expr = False + elif node.type == 'star_expr': + is_star_expr = True elif isinstance(node, (ExprStmt, CompFor)): break @@ -255,9 +301,51 @@ class ContextualizedName(ContextualizedNode): return indexes +def _getitem(context, index_contexts, contextualized_node): + from jedi.evaluate.context.iterable import Slice + + # The actual getitem call. + simple_getitem = getattr(context, 'py__simple_getitem__', None) + + result = NO_CONTEXTS + unused_contexts = set() + for index_context in index_contexts: + if simple_getitem is not None: + index = index_context + if isinstance(index_context, Slice): + index = index.obj + + try: + method = index.get_safe_value + except AttributeError: + pass + else: + index = method(default=None) + + if type(index) in (float, int, str, unicode, slice, bytes): + try: + result |= simple_getitem(index) + continue + except SimpleGetItemNotFound: + pass + + unused_contexts.add(index_context) + + # The index was somehow not good enough or simply a wrong type. + # Therefore we now iterate through all the contexts and just take + # all results. + if unused_contexts or not index_contexts: + result |= context.py__getitem__( + ContextSet(unused_contexts), + contextualized_node + ) + debug.dbg('py__getitem__ result: %s', result) + return result + + class ContextSet(BaseContextSet): def py__class__(self): - return ContextSet.from_iterable(c.py__class__() for c in self._set) + return ContextSet(c.py__class__() for c in self._set) def iterate(self, contextualized_node=None, is_async=False): from jedi.evaluate.lazy_context import get_merged_lazy_context @@ -267,12 +355,43 @@ class ContextSet(BaseContextSet): [l for l in lazy_contexts if l is not None] ) + def execute(self, arguments): + return ContextSet.from_sets(c.evaluator.execute(c, arguments) for c in self._set) -NO_CONTEXTS = ContextSet() + def execute_evaluated(self, *args, **kwargs): + return ContextSet.from_sets(execute_evaluated(c, *args, **kwargs) for c in self._set) + + def py__getattribute__(self, *args, **kwargs): + if kwargs.get('is_goto'): + return reduce(add, [c.py__getattribute__(*args, **kwargs) for c in self._set], []) + return ContextSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) + + def get_item(self, *args, **kwargs): + return ContextSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) + + def try_merge(self, function_name): + context_set = self.__class__([]) + for c in self._set: + try: + method = getattr(c, function_name) + except AttributeError: + pass + else: + context_set |= method() + return context_set + + def gather_annotation_classes(self): + return ContextSet.from_sets([c.gather_annotation_classes() for c in self._set]) + + def get_signatures(self): + return [sig for c in self._set for sig in c.get_signatures()] + + +NO_CONTEXTS = ContextSet([]) def iterator_to_context_set(func): def wrapper(*args, **kwargs): - return ContextSet.from_iterable(func(*args, **kwargs)) + return ContextSet(func(*args, **kwargs)) return wrapper diff --git a/jedi/evaluate/cache.py b/jedi/evaluate/cache.py index c619e698..e865a0dc 100644 --- a/jedi/evaluate/cache.py +++ b/jedi/evaluate/cache.py @@ -4,7 +4,10 @@ - ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes. """ +from jedi import debug + _NO_DEFAULT = object() +_RECURSION_SENTINEL = object() def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False): @@ -28,8 +31,7 @@ def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_a try: memo = cache[function] except KeyError: - memo = {} - cache[function] = memo + cache[function] = memo = {} key = (obj, args, frozenset(kwargs.items())) if key in memo: @@ -75,3 +77,47 @@ class CachedMetaClass(type): @evaluator_as_method_param_cache() def __call__(self, *args, **kwargs): return super(CachedMetaClass, self).__call__(*args, **kwargs) + + +def evaluator_method_generator_cache(): + """ + This is a special memoizer. It memoizes generators and also checks for + recursion errors and returns no further iterator elemends in that case. + """ + def func(function): + def wrapper(obj, *args, **kwargs): + cache = obj.evaluator.memoize_cache + try: + memo = cache[function] + except KeyError: + cache[function] = memo = {} + + key = (obj, args, frozenset(kwargs.items())) + + if key in memo: + actual_generator, cached_lst = memo[key] + else: + actual_generator = function(obj, *args, **kwargs) + cached_lst = [] + memo[key] = actual_generator, cached_lst + + i = 0 + while True: + try: + next_element = cached_lst[i] + if next_element is _RECURSION_SENTINEL: + debug.warning('Found a generator recursion for %s' % obj) + # This means we have hit a recursion. + return + except IndexError: + cached_lst.append(_RECURSION_SENTINEL) + next_element = next(actual_generator, None) + if next_element is None: + cached_lst.pop() + return + cached_lst[-1] = next_element + yield next_element + i += 1 + return wrapper + + return func diff --git a/jedi/evaluate/compiled/__init__.py b/jedi/evaluate/compiled/__init__.py index 357d26cc..6244f146 100644 --- a/jedi/evaluate/compiled/__init__.py +++ b/jedi/evaluate/compiled/__init__.py @@ -1,12 +1,35 @@ from jedi._compatibility import unicode from jedi.evaluate.compiled.context import CompiledObject, CompiledName, \ - CompiledObjectFilter, CompiledContextName, create_from_access_path, \ - create_from_name + CompiledObjectFilter, CompiledContextName, create_from_access_path +from jedi.evaluate.base_context import ContextWrapper +from jedi.evaluate.helpers import execute_evaluated def builtin_from_name(evaluator, string): - builtins = evaluator.builtins_module - return create_from_name(evaluator, builtins, string) + typing_builtins_module = evaluator.builtins_module + if string in ('None', 'True', 'False'): + builtins, = typing_builtins_module.non_stub_context_set + filter_ = next(builtins.get_filters()) + else: + filter_ = next(typing_builtins_module.get_filters()) + name, = filter_.get(string) + context, = name.infer() + return context + + +class CompiledValue(ContextWrapper): + def __init__(self, instance, compiled_obj): + super(CompiledValue, self).__init__(instance) + self._compiled_obj = compiled_obj + + def __getattribute__(self, name): + if name in ('get_safe_value', 'execute_operation', 'access_handle', + 'negate', 'py__bool__', 'is_compiled'): + return getattr(self._compiled_obj, name) + return super(CompiledValue, self).__getattribute__(name) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._compiled_obj) def create_simple_object(evaluator, obj): @@ -14,26 +37,25 @@ def create_simple_object(evaluator, obj): Only allows creations of objects that are easily picklable across Python versions. """ - assert isinstance(obj, (int, float, str, bytes, unicode, slice, complex)) - return create_from_access_path( + assert type(obj) in (int, float, str, bytes, unicode, slice, complex, bool), obj + compiled_obj = create_from_access_path( evaluator, evaluator.compiled_subprocess.create_simple_object(obj) ) - - -def get_special_object(evaluator, identifier): - return create_from_access_path( - evaluator, - evaluator.compiled_subprocess.get_special_object(identifier) - ) + instance, = builtin_from_name(evaluator, compiled_obj.name.string_name).execute() + return CompiledValue(instance, compiled_obj) def get_string_context_set(evaluator): - return builtin_from_name(evaluator, u'str').execute_evaluated() + return execute_evaluated(builtin_from_name(evaluator, u'str')) -def load_module(evaluator, **kwargs): - access_path = evaluator.compiled_subprocess.load_module(**kwargs) +def load_module(evaluator, dotted_name, **kwargs): + # Temporary, some tensorflow builtins cannot be loaded, so it's tried again + # and again and it's really slow. + if dotted_name.startswith('tensorflow.'): + return None + access_path = evaluator.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs) if access_path is None: return None return create_from_access_path(evaluator, access_path) diff --git a/jedi/evaluate/compiled/access.py b/jedi/evaluate/compiled/access.py index 97a8dd0b..4e704947 100644 --- a/jedi/evaluate/compiled/access.py +++ b/jedi/evaluate/compiled/access.py @@ -1,15 +1,13 @@ +from __future__ import print_function import inspect import types import sys -from textwrap import dedent import operator as op from collections import namedtuple -from jedi import debug -from jedi._compatibility import unicode, is_py3, is_py34, builtins, \ - py_version, force_unicode, print_to_stderr +from jedi._compatibility import unicode, is_py3, builtins, \ + py_version, force_unicode from jedi.evaluate.compiled.getattr_static import getattr_static -from jedi.evaluate.utils import dotted_from_fs_path MethodDescriptorType = type(str.replace) @@ -33,10 +31,9 @@ NOT_CLASS_TYPES = ( if is_py3: NOT_CLASS_TYPES += ( types.MappingProxyType, - types.SimpleNamespace + types.SimpleNamespace, + types.DynamicClassAttribute, ) - if is_py34: - NOT_CLASS_TYPES += (types.DynamicClassAttribute,) # Those types don't exist in typing. @@ -46,12 +43,6 @@ WrapperDescriptorType = type(set.__iter__) object_class_dict = type.__dict__["__dict__"].__get__(object) ClassMethodDescriptorType = type(object_class_dict['__subclasshook__']) -def _a_generator(foo): - """Used to have an object to return for generators.""" - yield 42 - yield foo - - _sentinel = object() # Maps Python syntax to the operator module. @@ -137,33 +128,26 @@ def create_access(evaluator, obj): return evaluator.compiled_subprocess.get_or_create_access_handle(obj) -def load_module(evaluator, path=None, name=None, sys_path=None): - if sys_path is None: - sys_path = list(evaluator.get_sys_path()) - if path is not None: - dotted_path = dotted_from_fs_path(path, sys_path=sys_path) - else: - dotted_path = name - +def load_module(evaluator, dotted_name, sys_path): temp, sys.path = sys.path, sys_path try: - __import__(dotted_path) + __import__(dotted_name) except ImportError: # If a module is "corrupt" or not really a Python module or whatever. - debug.warning('Module %s not importable in path %s.', dotted_path, path) + print('Module %s not importable in path %s.' % (dotted_name, sys_path), file=sys.stderr) return None except Exception: # Since __import__ pretty much makes code execution possible, just # catch any error here and print it. import traceback - print_to_stderr("Cannot import:\n%s" % traceback.format_exc()) + print("Cannot import:\n%s" % traceback.format_exc(), file=sys.stderr) return None finally: sys.path = temp # Just access the cache after import, because of #59 as well as the very # complicated import structure of Python. - module = sys.modules[dotted_path] + module = sys.modules[dotted_name] return create_access_path(evaluator, module) @@ -213,7 +197,7 @@ class DirectObjectAccess(object): except AttributeError: return None - def py__doc__(self, include_call_signature=False): + def py__doc__(self): return force_unicode(inspect.getdoc(self._obj)) or u'' def py__name__(self): @@ -236,7 +220,12 @@ class DirectObjectAccess(object): def py__mro__accesses(self): return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:]) - def py__getitem__(self, index): + def py__getitem__all_values(self): + if isinstance(self._obj, dict): + return [self._create_access_path(v) for v in self._obj.values()] + return self.py__iter__list() + + def py__simple_getitem__(self, index): if type(self._obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): # Get rid of side effects, we won't call custom `__getitem__`s. return None @@ -244,6 +233,9 @@ class DirectObjectAccess(object): return self._create_access_path(self._obj[index]) def py__iter__list(self): + if not hasattr(self._obj, '__getitem__'): + return None + if type(self._obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): # Get rid of side effects, we won't call custom `__getitem__`s. return [] @@ -262,6 +254,9 @@ class DirectObjectAccess(object): def py__bases__(self): return [self._create_access_path(base) for base in self._obj.__bases__] + def py__path__(self): + return self._obj.__path__ + @_force_unicode_decorator def get_repr(self): builtins = 'builtins', '__builtin__' @@ -366,7 +361,6 @@ class DirectObjectAccess(object): yield builtins else: try: - # TODO use sys.modules, __module__ can be faked. yield sys.modules[imp_plz] except KeyError: # __module__ can be something arbitrary that doesn't exist. @@ -423,12 +417,6 @@ class DirectObjectAccess(object): def negate(self): return self._create_access_path(-self._obj) - def dict_values(self): - return [self._create_access_path(v) for v in self._obj.values()] - - def is_super_class(self, exception): - return issubclass(exception, self._obj) - def get_dir_infos(self): """ Used to return a couple of infos that are needed when accessing the sub @@ -450,41 +438,3 @@ def _is_class_instance(obj): return False else: return cls != type and not issubclass(cls, NOT_CLASS_TYPES) - - -if py_version >= 35: - exec(compile(dedent(""" - async def _coroutine(): pass - _coroutine = _coroutine() - CoroutineType = type(_coroutine) - _coroutine.close() # Prevent ResourceWarning - """), 'blub', 'exec')) - _coroutine_wrapper = _coroutine.__await__() -else: - _coroutine = None - _coroutine_wrapper = None - -if py_version >= 36: - exec(compile(dedent(""" - async def _async_generator(): - yield - _async_generator = _async_generator() - AsyncGeneratorType = type(_async_generator) - """), 'blub', 'exec')) -else: - _async_generator = None - -class _SPECIAL_OBJECTS(object): - FUNCTION_CLASS = types.FunctionType - METHOD_CLASS = type(DirectObjectAccess.py__bool__) - MODULE_CLASS = types.ModuleType - GENERATOR_OBJECT = _a_generator(1.0) - BUILTINS = builtins - COROUTINE = _coroutine - COROUTINE_WRAPPER = _coroutine_wrapper - ASYNC_GENERATOR = _async_generator - - -def get_special_object(evaluator, identifier): - obj = getattr(_SPECIAL_OBJECTS, identifier) - return create_access_path(evaluator, obj) diff --git a/jedi/evaluate/compiled/context.py b/jedi/evaluate/compiled/context.py index a51717f0..36f1936b 100644 --- a/jedi/evaluate/compiled/context.py +++ b/jedi/evaluate/compiled/context.py @@ -5,92 +5,110 @@ import re from functools import partial from jedi import debug -from jedi._compatibility import force_unicode, Parameter +from jedi._compatibility import force_unicode, Parameter, cast_path from jedi.cache import underscore_memoization, memoize_method -from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \ - ContextNameMixin -from jedi.evaluate.base_context import Context, ContextSet +from jedi.evaluate.filters import AbstractFilter +from jedi.evaluate.names import AbstractNameDefinition, ContextNameMixin +from jedi.evaluate.base_context import Context, ContextSet, NO_CONTEXTS from jedi.evaluate.lazy_context import LazyKnownContext from jedi.evaluate.compiled.access import _sentinel from jedi.evaluate.cache import evaluator_function_cache -from jedi.evaluate.helpers import reraise_as_evaluator -from . import fake +from jedi.evaluate.helpers import reraise_getitem_errors, execute_evaluated +from jedi.evaluate.signature import BuiltinSignature class CheckAttribute(object): """Raises an AttributeError if the attribute X isn't available.""" - def __init__(self, func): - self.func = func + def __init__(self, check_name=None): # Remove the py in front of e.g. py__call__. - self.check_name = force_unicode(func.__name__[2:]) + self.check_name = check_name + + def __call__(self, func): + self.func = func + if self.check_name is None: + self.check_name = force_unicode(func.__name__[2:]) + return self def __get__(self, instance, owner): if instance is None: return self # This might raise an AttributeError. That's wanted. - if self.check_name == '__iter__': - # Python iterators are a bit strange, because there's no need for - # the __iter__ function as long as __getitem__ is defined (it will - # just start with __getitem__(0). This is especially true for - # Python 2 strings, where `str.__iter__` is not even defined. - if not instance.access_handle.has_iter(): - raise AttributeError - else: - instance.access_handle.getattr(self.check_name) + instance.access_handle.getattr(self.check_name) return partial(self.func, instance) class CompiledObject(Context): - def __init__(self, evaluator, access_handle, parent_context=None, faked_class=None): + def __init__(self, evaluator, access_handle, parent_context=None): super(CompiledObject, self).__init__(evaluator, parent_context) self.access_handle = access_handle - # This attribute will not be set for most classes, except for fakes. - self.tree_node = faked_class - @CheckAttribute - def py__call__(self, params): + @CheckAttribute() + def py__call__(self, arguments): if self.tree_node is not None and self.tree_node.type == 'funcdef': from jedi.evaluate.context.function import FunctionContext return FunctionContext( self.evaluator, parent_context=self.parent_context, - funcdef=self.tree_node - ).py__call__(params) + tree_node=self.tree_node + ).py__call__(arguments=arguments) if self.access_handle.is_class(): from jedi.evaluate.context import CompiledInstance - return ContextSet(CompiledInstance(self.evaluator, self.parent_context, self, params)) + return ContextSet([ + CompiledInstance(self.evaluator, self.parent_context, self, arguments) + ]) else: - return ContextSet.from_iterable(self._execute_function(params)) + return ContextSet(self._execute_function(arguments)) - @CheckAttribute + @CheckAttribute() def py__class__(self): return create_from_access_path(self.evaluator, self.access_handle.py__class__()) - @CheckAttribute + @CheckAttribute() def py__mro__(self): return (self,) + tuple( create_from_access_path(self.evaluator, access) for access in self.access_handle.py__mro__accesses() ) - @CheckAttribute + @CheckAttribute() def py__bases__(self): return tuple( create_from_access_path(self.evaluator, access) for access in self.access_handle.py__bases__() ) + @CheckAttribute() + def py__path__(self): + return map(cast_path, self.access_handle.py__path__()) + + @property + def string_names(self): + # For modules + name = self.py__name__() + if name is None: + return [] + return tuple(name.split('.')) + + def get_qualified_names(self): + return self.string_names + def py__bool__(self): return self.access_handle.py__bool__() def py__file__(self): - return self.access_handle.py__file__() + return cast_path(self.access_handle.py__file__()) def is_class(self): return self.access_handle.is_class() - def py__doc__(self, include_call_signature=False): + def is_compiled(self): + return True + + def is_stub(self): + return False + + def py__doc__(self): return self.access_handle.py__doc__() def get_param_names(self): @@ -108,6 +126,9 @@ class CompiledObject(Context): for signature_param in signature_params: yield SignatureParamName(self, signature_param) + def get_signatures(self): + return [BuiltinSignature(self)] + def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.access_handle.get_repr()) @@ -144,18 +165,41 @@ class CompiledObject(Context): """ return CompiledObjectFilter(self.evaluator, self, is_instance) - @CheckAttribute - def py__getitem__(self, index): - with reraise_as_evaluator(IndexError, KeyError, TypeError): - access = self.access_handle.py__getitem__(index) + @CheckAttribute(u'__getitem__') + def py__simple_getitem__(self, index): + with reraise_getitem_errors(IndexError, KeyError, TypeError): + access = self.access_handle.py__simple_getitem__(index) if access is None: - return ContextSet() + return NO_CONTEXTS - return ContextSet(create_from_access_path(self.evaluator, access)) + return ContextSet([create_from_access_path(self.evaluator, access)]) - @CheckAttribute - def py__iter__(self): - for access in self.access_handle.py__iter__list(): + def py__getitem__(self, index_context_set, contextualized_node): + all_access_paths = self.access_handle.py__getitem__all_values() + if all_access_paths is None: + # This means basically that no __getitem__ has been defined on this + # object. + return super(CompiledObject, self).py__getitem__(index_context_set, contextualized_node) + return ContextSet( + create_from_access_path(self.evaluator, access) + for access in all_access_paths + ) + + def py__iter__(self, contextualized_node=None): + # Python iterators are a bit strange, because there's no need for + # the __iter__ function as long as __getitem__ is defined (it will + # just start with __getitem__(0). This is especially true for + # Python 2 strings, where `str.__iter__` is not even defined. + if not self.access_handle.has_iter(): + for x in super(CompiledObject, self).py__iter__(contextualized_node): + yield x + + access_path_list = self.access_handle.py__iter__list() + if access_path_list is None: + # There is no __iter__ method on this object. + return + + for access in access_path_list: yield LazyKnownContext(create_from_access_path(self.evaluator, access)) def py__name__(self): @@ -183,17 +227,11 @@ class CompiledObject(Context): continue else: bltn_obj = builtin_from_name(self.evaluator, name) - for result in bltn_obj.execute(params): + for result in self.evaluator.execute(bltn_obj, params): yield result for type_ in docstrings.infer_return_types(self): yield type_ - def dict_values(self): - return ContextSet.from_iterable( - create_from_access_path(self.evaluator, access) - for access in self.access_handle.dict_values() - ) - def get_safe_value(self, default=_sentinel): try: return self.access_handle.get_safe_value() @@ -211,9 +249,6 @@ class CompiledObject(Context): def negate(self): return create_from_access_path(self.evaluator, self.access_handle.negate()) - def is_super_class(self, exception): - return self.access_handle.is_super_class(exception) - class CompiledName(AbstractNameDefinition): def __init__(self, evaluator, parent_context, name): @@ -234,9 +269,9 @@ class CompiledName(AbstractNameDefinition): @underscore_memoization def infer(self): - return ContextSet(create_from_name( + return ContextSet([_create_from_name( self._evaluator, self.parent_context, self.string_name - )) + )]) class SignatureParamName(AbstractNameDefinition): @@ -259,12 +294,12 @@ class SignatureParamName(AbstractNameDefinition): def infer(self): p = self._signature_param evaluator = self.parent_context.evaluator - contexts = ContextSet() + contexts = NO_CONTEXTS if p.has_default: - contexts = ContextSet(create_from_access_path(evaluator, p.default)) + contexts = ContextSet([create_from_access_path(evaluator, p.default)]) if p.has_annotation: annotation = create_from_access_path(evaluator, p.annotation) - contexts |= annotation.execute_evaluated() + contexts |= execute_evaluated(annotation) return contexts @@ -279,7 +314,7 @@ class UnresolvableParamName(AbstractNameDefinition): return Parameter.POSITIONAL_ONLY def infer(self): - return ContextSet() + return NO_CONTEXTS class CompiledContextName(ContextNameMixin, AbstractNameDefinition): @@ -300,7 +335,7 @@ class EmptyCompiledName(AbstractNameDefinition): self.string_name = name def infer(self): - return ContextSet() + return NO_CONTEXTS class CompiledObjectFilter(AbstractFilter): @@ -309,7 +344,7 @@ class CompiledObjectFilter(AbstractFilter): def __init__(self, evaluator, compiled_object, is_instance=False): self._evaluator = evaluator self._compiled_object = compiled_object - self._is_instance = is_instance + self.is_instance = is_instance def get(self, name): return self._get( @@ -333,7 +368,7 @@ class CompiledObjectFilter(AbstractFilter): if is_descriptor or not has_attribute: return [self._get_cached_name(name, is_empty=True)] - if self._is_instance and name not in dir_callback(): + if self.is_instance and name not in dir_callback(): return [] return [self._get_cached_name(name)] @@ -356,7 +391,7 @@ class CompiledObjectFilter(AbstractFilter): ) # ``dir`` doesn't include the type names. - if not self._is_instance and needs_type_completions: + if not self.is_instance and needs_type_completions: for filter in builtin_from_name(self._evaluator, u'type').get_filters(): names += filter.values() return names @@ -364,6 +399,9 @@ class CompiledObjectFilter(AbstractFilter): def _create_name(self, name): return self.name_class(self._evaluator, self._compiled_object, name) + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self._compiled_object) + docstr_defaults = { 'floating point number': u'float', @@ -435,42 +473,31 @@ def _parse_function_doc(doc): return param_str, ret -def create_from_name(evaluator, compiled_object, name): - faked = None - try: - faked = fake.get_faked_with_parent_context(compiled_object, name) - except fake.FakeDoesNotExist: - pass - +def _create_from_name(evaluator, compiled_object, name): access = compiled_object.access_handle.getattr(name, default=None) + parent_context = compiled_object + if parent_context.is_class(): + parent_context = parent_context.parent_context return create_cached_compiled_object( - evaluator, access, parent_context=compiled_object, faked=faked + evaluator, access, parent_context=parent_context ) def _normalize_create_args(func): """The cache doesn't care about keyword vs. normal args.""" - def wrapper(evaluator, obj, parent_context=None, faked=None): - return func(evaluator, obj, parent_context, faked) + def wrapper(evaluator, obj, parent_context=None): + return func(evaluator, obj, parent_context) return wrapper def create_from_access_path(evaluator, access_path): parent_context = None for name, access in access_path.accesses: - try: - if parent_context is None: - faked = fake.get_faked_module(evaluator, access_path.accesses[0][0]) - else: - faked = fake.get_faked_with_parent_context(parent_context, name) - except fake.FakeDoesNotExist: - faked = None - - parent_context = create_cached_compiled_object(evaluator, access, parent_context, faked) + parent_context = create_cached_compiled_object(evaluator, access, parent_context) return parent_context @_normalize_create_args @evaluator_function_cache() -def create_cached_compiled_object(evaluator, access_handle, parent_context, faked): - return CompiledObject(evaluator, access_handle, parent_context, faked) +def create_cached_compiled_object(evaluator, access_handle, parent_context): + return CompiledObject(evaluator, access_handle, parent_context) diff --git a/jedi/evaluate/compiled/fake.py b/jedi/evaluate/compiled/fake.py deleted file mode 100644 index ac43ea28..00000000 --- a/jedi/evaluate/compiled/fake.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -Loads functions that are mixed in to the standard library. E.g. builtins are -written in C (binaries), but my autocompletion only understands Python code. By -mixing in Python code, the autocompletion should work much better for builtins. -""" - -import os -from itertools import chain - -from jedi._compatibility import unicode - -fake_modules = {} - - -def _get_path_dict(): - path = os.path.dirname(os.path.abspath(__file__)) - base_path = os.path.join(path, 'fake') - dct = {} - for file_name in os.listdir(base_path): - if file_name.endswith('.pym'): - dct[file_name[:-4]] = os.path.join(base_path, file_name) - return dct - - -_path_dict = _get_path_dict() - - -class FakeDoesNotExist(Exception): - pass - - -def _load_faked_module(evaluator, module_name): - try: - return fake_modules[module_name] - except KeyError: - pass - - check_module_name = module_name - if module_name == '__builtin__' and evaluator.environment.version_info.major == 2: - check_module_name = 'builtins' - - try: - path = _path_dict[check_module_name] - except KeyError: - fake_modules[module_name] = None - return - - with open(path) as f: - source = f.read() - - fake_modules[module_name] = m = evaluator.latest_grammar.parse(unicode(source)) - - if check_module_name != module_name: - # There are two implementations of `open` for either python 2/3. - # -> Rename the python2 version (`look at fake/builtins.pym`). - open_func = _search_scope(m, 'open') - open_func.children[1].value = 'open_python3' - open_func = _search_scope(m, 'open_python2') - open_func.children[1].value = 'open' - return m - - -def _search_scope(scope, obj_name): - for s in chain(scope.iter_classdefs(), scope.iter_funcdefs()): - if s.name.value == obj_name: - return s - - -def get_faked_with_parent_context(parent_context, name): - if parent_context.tree_node is not None: - # Try to search in already clearly defined stuff. - found = _search_scope(parent_context.tree_node, name) - if found is not None: - return found - raise FakeDoesNotExist - - -def get_faked_module(evaluator, string_name): - module = _load_faked_module(evaluator, string_name) - if module is None: - raise FakeDoesNotExist - return module diff --git a/jedi/evaluate/compiled/fake/_functools.pym b/jedi/evaluate/compiled/fake/_functools.pym deleted file mode 100644 index 909ef03f..00000000 --- a/jedi/evaluate/compiled/fake/_functools.pym +++ /dev/null @@ -1,9 +0,0 @@ -class partial(): - def __init__(self, func, *args, **keywords): - self.__func = func - self.__args = args - self.__keywords = keywords - - def __call__(self, *args, **kwargs): - # TODO should be **dict(self.__keywords, **kwargs) - return self.__func(*(self.__args + args), **self.__keywords) diff --git a/jedi/evaluate/compiled/fake/_sqlite3.pym b/jedi/evaluate/compiled/fake/_sqlite3.pym deleted file mode 100644 index 2151e652..00000000 --- a/jedi/evaluate/compiled/fake/_sqlite3.pym +++ /dev/null @@ -1,26 +0,0 @@ -def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None): - return Connection() - - -class Connection(): - def cursor(self): - return Cursor() - - -class Cursor(): - def cursor(self): - return Cursor() - - def fetchone(self): - return Row() - - def fetchmany(self, size=cursor.arraysize): - return [self.fetchone()] - - def fetchall(self): - return [self.fetchone()] - - -class Row(): - def keys(self): - return [''] diff --git a/jedi/evaluate/compiled/fake/_sre.pym b/jedi/evaluate/compiled/fake/_sre.pym deleted file mode 100644 index 217be563..00000000 --- a/jedi/evaluate/compiled/fake/_sre.pym +++ /dev/null @@ -1,99 +0,0 @@ -def compile(): - class SRE_Match(): - endpos = int() - lastgroup = int() - lastindex = int() - pos = int() - string = str() - regs = ((int(), int()),) - - def __init__(self, pattern): - self.re = pattern - - def start(self): - return int() - - def end(self): - return int() - - def span(self): - return int(), int() - - def expand(self): - return str() - - def group(self, nr): - return str() - - def groupdict(self): - return {str(): str()} - - def groups(self): - return (str(),) - - class SRE_Pattern(): - flags = int() - groupindex = {} - groups = int() - pattern = str() - - def findall(self, string, pos=None, endpos=None): - """ - findall(string[, pos[, endpos]]) --> list. - Return a list of all non-overlapping matches of pattern in string. - """ - return [str()] - - def finditer(self, string, pos=None, endpos=None): - """ - finditer(string[, pos[, endpos]]) --> iterator. - Return an iterator over all non-overlapping matches for the - RE pattern in string. For each match, the iterator returns a - match object. - """ - yield SRE_Match(self) - - def match(self, string, pos=None, endpos=None): - """ - match(string[, pos[, endpos]]) --> match object or None. - Matches zero or more characters at the beginning of the string - pattern - """ - return SRE_Match(self) - - def scanner(self, string, pos=None, endpos=None): - pass - - def search(self, string, pos=None, endpos=None): - """ - search(string[, pos[, endpos]]) --> match object or None. - Scan through string looking for a match, and return a corresponding - MatchObject instance. Return None if no position in the string matches. - """ - return SRE_Match(self) - - def split(self, string, maxsplit=0]): - """ - split(string[, maxsplit = 0]) --> list. - Split string by the occurrences of pattern. - """ - return [str()] - - def sub(self, repl, string, count=0): - """ - sub(repl, string[, count = 0]) --> newstring - Return the string obtained by replacing the leftmost non-overlapping - occurrences of pattern in string by the replacement repl. - """ - return str() - - def subn(self, repl, string, count=0): - """ - subn(repl, string[, count = 0]) --> (newstring, number of subs) - Return the tuple (new_string, number_of_subs_made) found by replacing - the leftmost non-overlapping occurrences of pattern with the - replacement repl. - """ - return (str(), int()) - - return SRE_Pattern() diff --git a/jedi/evaluate/compiled/fake/_weakref.pym b/jedi/evaluate/compiled/fake/_weakref.pym deleted file mode 100644 index 298d0b0d..00000000 --- a/jedi/evaluate/compiled/fake/_weakref.pym +++ /dev/null @@ -1,9 +0,0 @@ -def proxy(object, callback=None): - return object - -class ref(): - def __init__(self, object, callback=None): - self.__object = object - - def __call__(self): - return self.__object diff --git a/jedi/evaluate/compiled/fake/builtins.pym b/jedi/evaluate/compiled/fake/builtins.pym deleted file mode 100644 index 46ec619f..00000000 --- a/jedi/evaluate/compiled/fake/builtins.pym +++ /dev/null @@ -1,277 +0,0 @@ -""" -Pure Python implementation of some builtins. -This code is not going to be executed anywhere. -These implementations are not always correct, but should work as good as -possible for the auto completion. -""" - - -def next(iterator, default=None): - if random.choice([0, 1]): - if hasattr("next"): - return iterator.next() - else: - return iterator.__next__() - else: - if default is not None: - return default - - -def iter(collection, sentinel=None): - if sentinel: - yield collection() - else: - for c in collection: - yield c - - -def range(start, stop=None, step=1): - return [0] - - -class file(): - def __iter__(self): - yield '' - - def next(self): - return '' - - def readlines(self): - return [''] - - def __enter__(self): - return self - - -class xrange(): - # Attention: this function doesn't exist in Py3k (there it is range). - def __iter__(self): - yield 1 - - def count(self): - return 1 - - def index(self): - return 1 - - -def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True): - import io - return io.TextIOWrapper(file, mode, buffering, encoding, errors, newline, closefd) - - -def open_python2(name, mode=None, buffering=None): - return file(name, mode, buffering) - - -#-------------------------------------------------------- -# descriptors -#-------------------------------------------------------- -class property(): - def __init__(self, fget, fset=None, fdel=None, doc=None): - self.fget = fget - self.fset = fset - self.fdel = fdel - self.__doc__ = doc - - def __get__(self, obj, cls): - return self.fget(obj) - - def __set__(self, obj, value): - self.fset(obj, value) - - def __delete__(self, obj): - self.fdel(obj) - - def setter(self, func): - self.fset = func - return self - - def getter(self, func): - self.fget = func - return self - - def deleter(self, func): - self.fdel = func - return self - - -class staticmethod(): - def __init__(self, func): - self.__func = func - - def __get__(self, obj, cls): - return self.__func - - -class classmethod(): - def __init__(self, func): - self.__func = func - - def __get__(self, obj, cls): - def _method(*args, **kwargs): - return self.__func(cls, *args, **kwargs) - return _method - - -#-------------------------------------------------------- -# array stuff -#-------------------------------------------------------- -class list(): - def __init__(self, iterable=[]): - self.__iterable = [] - for i in iterable: - self.__iterable += [i] - - def __iter__(self): - for i in self.__iterable: - yield i - - def __getitem__(self, y): - return self.__iterable[y] - - def pop(self): - return self.__iterable[int()] - - -class tuple(): - def __init__(self, iterable=[]): - self.__iterable = [] - for i in iterable: - self.__iterable += [i] - - def __iter__(self): - for i in self.__iterable: - yield i - - def __getitem__(self, y): - return self.__iterable[y] - - def index(self): - return 1 - - def count(self): - return 1 - - -class set(): - def __init__(self, iterable=[]): - self.__iterable = iterable - - def __iter__(self): - for i in self.__iterable: - yield i - - def pop(self): - return list(self.__iterable)[-1] - - def copy(self): - return self - - def difference(self, other): - return self - other - - def intersection(self, other): - return self & other - - def symmetric_difference(self, other): - return self ^ other - - def union(self, other): - return self | other - - -class frozenset(): - def __init__(self, iterable=[]): - self.__iterable = iterable - - def __iter__(self): - for i in self.__iterable: - yield i - - def copy(self): - return self - - -class dict(): - def __init__(self, **elements): - self.__elements = elements - - def clear(self): - # has a strange docstr - pass - - def __getitem__(self, obj): - return self.__elements[obj] - - def get(self, k, d=None): - # TODO implement - try: - return self.__elements[k] - pass - except KeyError: - return d - - def values(self): - return self.__elements.values() - - def setdefault(self, k, d): - # TODO maybe also return the content - return d - - -class enumerate(): - def __init__(self, sequence, start=0): - self.__sequence = sequence - - def __iter__(self): - for i in self.__sequence: - yield 1, i - - def __next__(self): - return next(self.__iter__()) - - def next(self): - return next(self.__iter__()) - - -class reversed(): - def __init__(self, sequence): - self.__sequence = sequence - - def __iter__(self): - for i in self.__sequence: - yield i - - def __next__(self): - return next(self.__iter__()) - - def next(self): - return next(self.__iter__()) - - -def sorted(iterable, cmp=None, key=None, reverse=False): - return iterable - - -#-------------------------------------------------------- -# basic types -#-------------------------------------------------------- -class int(): - def __init__(self, x, base=None): - pass - - -class str(): - def __init__(self, obj): - pass - - def strip(self): - return str() - - def split(self): - return [str()] - -class type(): - def mro(): - return [object] diff --git a/jedi/evaluate/compiled/fake/datetime.pym b/jedi/evaluate/compiled/fake/datetime.pym deleted file mode 100644 index 823ac5b7..00000000 --- a/jedi/evaluate/compiled/fake/datetime.pym +++ /dev/null @@ -1,4 +0,0 @@ -class datetime(): - @staticmethod - def now(): - return datetime() diff --git a/jedi/evaluate/compiled/fake/io.pym b/jedi/evaluate/compiled/fake/io.pym deleted file mode 100644 index c1f4fc01..00000000 --- a/jedi/evaluate/compiled/fake/io.pym +++ /dev/null @@ -1,12 +0,0 @@ -class TextIOWrapper(): - def __next__(self): - return str() - - def __iter__(self): - yield str() - - def readlines(self): - return [''] - - def __enter__(self): - return self diff --git a/jedi/evaluate/compiled/fake/operator.pym b/jedi/evaluate/compiled/fake/operator.pym deleted file mode 100644 index d40d4681..00000000 --- a/jedi/evaluate/compiled/fake/operator.pym +++ /dev/null @@ -1,33 +0,0 @@ -# Just copied this code from Python 3.6. - -class itemgetter: - """ - Return a callable object that fetches the given item(s) from its operand. - After f = itemgetter(2), the call f(r) returns r[2]. - After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) - """ - __slots__ = ('_items', '_call') - - def __init__(self, item, *items): - if not items: - self._items = (item,) - def func(obj): - return obj[item] - self._call = func - else: - self._items = items = (item,) + items - def func(obj): - return tuple(obj[i] for i in items) - self._call = func - - def __call__(self, obj): - return self._call(obj) - - def __repr__(self): - return '%s.%s(%s)' % (self.__class__.__module__, - self.__class__.__name__, - ', '.join(map(repr, self._items))) - - def __reduce__(self): - return self.__class__, self._items - diff --git a/jedi/evaluate/compiled/fake/posix.pym b/jedi/evaluate/compiled/fake/posix.pym deleted file mode 100644 index 4417f7cb..00000000 --- a/jedi/evaluate/compiled/fake/posix.pym +++ /dev/null @@ -1,5 +0,0 @@ -def getcwd(): - return '' - -def getcwdu(): - return '' diff --git a/jedi/evaluate/compiled/mixed.py b/jedi/evaluate/compiled/mixed.py index 773b5292..3a2aea7e 100644 --- a/jedi/evaluate/compiled/mixed.py +++ b/jedi/evaluate/compiled/mixed.py @@ -14,6 +14,7 @@ from jedi.evaluate import imports from jedi.evaluate.base_context import Context, ContextSet from jedi.evaluate.context import ModuleContext from jedi.evaluate.cache import evaluator_function_cache +from jedi.evaluate.helpers import execute_evaluated from jedi.evaluate.compiled.getattr_static import getattr_static from jedi.evaluate.compiled.access import compiled_objects_cache from jedi.evaluate.compiled.context import create_cached_compiled_object @@ -81,9 +82,9 @@ class MixedName(compiled.CompiledName): access_handle = self.parent_context.access_handle # TODO use logic from compiled.CompiledObjectFilter access_handle = access_handle.getattr(self.string_name, default=None) - return ContextSet( + return ContextSet([ _create(self._evaluator, access_handle, parent_context=self.parent_context) - ) + ]) @property def api_type(self): @@ -93,21 +94,13 @@ class MixedName(compiled.CompiledName): class MixedObjectFilter(compiled.CompiledObjectFilter): name_class = MixedName - def __init__(self, evaluator, mixed_object, is_instance=False): - super(MixedObjectFilter, self).__init__( - evaluator, mixed_object, is_instance) - self._mixed_object = mixed_object - - #def _create(self, name): - #return MixedName(self._evaluator, self._compiled_object, name) - @evaluator_function_cache() def _load_module(evaluator, path): - module_node = evaluator.grammar.parse( + module_node = evaluator.parse( path=path, cache=True, - diff_cache=True, + diff_cache=settings.fast_parser, cache_path=settings.cache_directory ).get_root_node() # python_module = inspect.getmodule(python_object) @@ -210,15 +203,18 @@ def _create(evaluator, access_handle, parent_context, *args): if parent_context.tree_node.get_root_node() == module_node: module_context = parent_context.get_root_context() else: + # TODO this __name__ is probably wrong. + name = compiled_object.get_root_context().py__name__() + string_names = tuple(name.split('.')) module_context = ModuleContext( evaluator, module_node, path=path, + string_names=string_names, code_lines=code_lines, + is_package=hasattr(compiled_object, 'py__path__'), ) - # TODO this __name__ is probably wrong. - name = compiled_object.get_root_context().py__name__() if name is not None: - imports.add_module_to_cache(evaluator, name, module_context) + evaluator.module_cache.add(string_names, ContextSet([module_context])) tree_context = module_context.create_context( tree_node, @@ -228,7 +224,7 @@ def _create(evaluator, access_handle, parent_context, *args): if tree_node.type == 'classdef': if not access_handle.is_class(): # Is an instance, not a class. - tree_context, = tree_context.execute_evaluated() + tree_context, = execute_evaluated(tree_context) return MixedObject( evaluator, diff --git a/jedi/evaluate/compiled/subprocess/__init__.py b/jedi/evaluate/compiled/subprocess/__init__.py index e088560d..f8dc8f20 100644 --- a/jedi/evaluate/compiled/subprocess/__init__.py +++ b/jedi/evaluate/compiled/subprocess/__init__.py @@ -15,27 +15,41 @@ import errno import weakref import traceback from functools import partial +from threading import Thread +try: + from queue import Queue, Empty +except ImportError: + from Queue import Queue, Empty # python 2.7 from jedi._compatibility import queue, is_py3, force_unicode, \ - pickle_dump, pickle_load, highest_pickle_protocol, GeneralizedPopen + pickle_dump, pickle_load, GeneralizedPopen +from jedi import debug from jedi.cache import memoize_method from jedi.evaluate.compiled.subprocess import functions from jedi.evaluate.compiled.access import DirectObjectAccess, AccessPath, \ SignatureParam from jedi.api.exceptions import InternalError -_subprocesses = {} _MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py') -def get_subprocess(executable, version): - try: - return _subprocesses[executable] - except KeyError: - sub = _subprocesses[executable] = _CompiledSubprocess(executable, - version) - return sub +def _enqueue_output(out, queue): + for line in iter(out.readline, b''): + queue.put(line) + out.close() + + +def _add_stderr_to_debug(stderr_queue): + while True: + # Try to do some error reporting from the subprocess and print its + # stderr contents. + try: + line = stderr_queue.get_nowait() + line = line.decode('utf-8', 'replace') + debug.warning('stderr output: %s' % line.rstrip('\n')) + except Empty: + break def _get_function(name): @@ -119,30 +133,40 @@ class EvaluatorSubprocess(_EvaluatorProcess): return obj def __del__(self): - if self._used: + if self._used and not self._compiled_subprocess.is_crashed: self._compiled_subprocess.delete_evaluator(self._evaluator_id) -class _CompiledSubprocess(object): - _crashed = False +class CompiledSubprocess(object): + is_crashed = False + # Start with 2, gets set after _get_info. + _pickle_protocol = 2 - def __init__(self, executable, version): + def __init__(self, executable): self._executable = executable self._evaluator_deletion_queue = queue.deque() - self._pickle_protocol = highest_pickle_protocol([sys.version_info, - version]) - @property + def __repr__(self): + pid = os.getpid() + return '<%s _executable=%r, _pickle_protocol=%r, is_crashed=%r, pid=%r>' % ( + self.__class__.__name__, + self._executable, + self._pickle_protocol, + self.is_crashed, + pid, + ) + @memoize_method - def _process(self): + def _get_process(self): + debug.dbg('Start environment subprocess %s', self._executable) parso_path = sys.modules['parso'].__file__ args = ( self._executable, _MAIN_PATH, os.path.dirname(os.path.dirname(parso_path)), - str(self._pickle_protocol) + '.'.join(str(x) for x in sys.version_info[:3]), ) - return GeneralizedPopen( + process = GeneralizedPopen( args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, @@ -151,6 +175,14 @@ class _CompiledSubprocess(object): # (this is already the case on Python 3). bufsize=-1 ) + self._stderr_queue = Queue() + self._stderr_thread = t = Thread( + target=_enqueue_output, + args=(process.stderr, self._stderr_queue) + ) + t.daemon = True + t.start() + return process def run(self, evaluator, function, args=(), kwargs={}): # Delete old evaluators. @@ -168,24 +200,23 @@ class _CompiledSubprocess(object): def get_sys_path(self): return self._send(None, functions.get_sys_path, (), {}) - def kill(self): - self._crashed = True + def _kill(self): + self.is_crashed = True try: - subprocess = _subprocesses[self._executable] - except KeyError: - # Fine it was already removed from the cache. + self._get_process().kill() + self._get_process().wait() + except (AttributeError, TypeError): + # If the Python process is terminating, it will remove some modules + # earlier than others and in general it's unclear how to deal with + # that so we just ignore the exceptions here. pass - else: - # In the `!=` case there is already a new subprocess in place - # and we don't need to do anything here anymore. - if subprocess == self: - del _subprocesses[self._executable] - self._process.kill() - self._process.wait() + def __del__(self): + if not self.is_crashed: + self._kill() def _send(self, evaluator_id, function, args=(), kwargs={}): - if self._crashed: + if self.is_crashed: raise InternalError("The subprocess %s has crashed." % self._executable) if not is_py3: @@ -194,7 +225,7 @@ class _CompiledSubprocess(object): data = evaluator_id, function, args, kwargs try: - pickle_dump(data, self._process.stdin, self._pickle_protocol) + pickle_dump(data, self._get_process().stdin, self._pickle_protocol) except (socket.error, IOError) as e: # Once Python2 will be removed we can just use `BrokenPipeError`. # Also, somehow in windows it returns EINVAL instead of EPIPE if @@ -202,18 +233,19 @@ class _CompiledSubprocess(object): if e.errno not in (errno.EPIPE, errno.EINVAL): # Not a broken pipe raise - self.kill() + self._kill() raise InternalError("The subprocess %s was killed. Maybe out of memory?" % self._executable) try: - is_exception, traceback, result = pickle_load(self._process.stdout) + is_exception, traceback, result = pickle_load(self._get_process().stdout) except EOFError as eof_error: try: - stderr = self._process.stderr.read() + stderr = self._get_process().stderr.read().decode('utf-8', 'replace') except Exception as exc: stderr = '' % exc - self.kill() + self._kill() + _add_stderr_to_debug(self._stderr_queue) raise InternalError( "The subprocess %s has crashed (%r, stderr=%s)." % ( self._executable, @@ -221,6 +253,8 @@ class _CompiledSubprocess(object): stderr, )) + _add_stderr_to_debug(self._stderr_queue) + if is_exception: # Replace the attribute error message with a the traceback. It's # way more informative. @@ -284,11 +318,9 @@ class Listener(object): def listen(self): stdout = sys.stdout - # Mute stdout/stderr. Nobody should actually be able to write to those, - # because stdout is used for IPC and stderr will just be annoying if it - # leaks (on module imports). + # Mute stdout. Nobody should actually be able to write to it, + # because stdout is used for IPC. sys.stdout = open(os.devnull, 'w') - sys.stderr = open(os.devnull, 'w') stdin = sys.stdin if sys.version_info[0] > 2: stdout = stdout.buffer @@ -304,9 +336,9 @@ class Listener(object): try: payload = pickle_load(stdin) except EOFError: - # It looks like the parent process closed. Don't make a big fuss - # here and just exit. - exit(1) + # It looks like the parent process closed. + # Don't make a big fuss here and just exit. + exit(0) try: result = False, None, self._run(*payload) except Exception as e: diff --git a/jedi/evaluate/compiled/subprocess/__main__.py b/jedi/evaluate/compiled/subprocess/__main__.py index ff7462fa..4be28204 100644 --- a/jedi/evaluate/compiled/subprocess/__main__.py +++ b/jedi/evaluate/compiled/subprocess/__main__.py @@ -1,5 +1,5 @@ -import sys import os +import sys def _get_paths(): @@ -45,7 +45,11 @@ else: load('jedi') from jedi.evaluate.compiled import subprocess # NOQA +from jedi._compatibility import highest_pickle_protocol # noqa: E402 + + # Retrieve the pickle protocol. -pickle_protocol = int(sys.argv[2]) +host_sys_version = [int(x) for x in sys.argv[2].split('.')] +pickle_protocol = highest_pickle_protocol([sys.version_info, host_sys_version]) # And finally start the client. -subprocess.Listener(pickle_protocol).listen() +subprocess.Listener(pickle_protocol=pickle_protocol).listen() diff --git a/jedi/evaluate/compiled/subprocess/functions.py b/jedi/evaluate/compiled/subprocess/functions.py index c0fc6d13..b3fdac04 100644 --- a/jedi/evaluate/compiled/subprocess/functions.py +++ b/jedi/evaluate/compiled/subprocess/functions.py @@ -1,8 +1,9 @@ +from __future__ import print_function import sys import os from jedi._compatibility import find_module, cast_path, force_unicode, \ - iter_modules, all_suffixes, print_to_stderr + iter_modules, all_suffixes from jedi.evaluate.compiled import access from jedi import parser_utils @@ -20,52 +21,24 @@ def get_compiled_method_return(evaluator, id, attribute, *args, **kwargs): return getattr(handle.access, attribute)(*args, **kwargs) -def get_special_object(evaluator, identifier): - return access.get_special_object(evaluator, identifier) - - def create_simple_object(evaluator, obj): return access.create_access_path(evaluator, obj) def get_module_info(evaluator, sys_path=None, full_name=None, **kwargs): + """ + Returns Tuple[Union[NamespaceInfo, FileIO, None], Optional[bool]] + """ if sys_path is not None: sys.path, temp = sys_path, sys.path try: - module_file, module_path, is_pkg = find_module(full_name=full_name, **kwargs) + return find_module(full_name=full_name, **kwargs) except ImportError: - return None, None, None + return None, None finally: if sys_path is not None: sys.path = temp - code = None - if is_pkg: - # In this case, we don't have a file yet. Search for the - # __init__ file. - if module_path.endswith(('.zip', '.egg')): - code = module_file.loader.get_source(full_name) - else: - module_path = _get_init_path(module_path) - elif module_file: - if module_path.endswith(('.zip', '.egg')): - # Unfortunately we are reading unicode here already, not byes. - # It seems however hard to get bytes, because the zip importer - # logic just unpacks the zip file and returns a file descriptor - # that we cannot as easily access. Therefore we just read it as - # a string. - code = module_file.read() - else: - # Read the code with a binary file, because the binary file - # might not be proper unicode. This is handled by the parser - # wrapper. - with open(module_path, 'rb') as f: - code = f.read() - - module_file.close() - - return code, cast_path(module_path), is_pkg - def list_module_names(evaluator, search_path): return [ @@ -90,7 +63,7 @@ def _test_print(evaluator, stderr=None, stdout=None): Force some prints in the subprocesses. This exists for unit tests. """ if stderr is not None: - print_to_stderr(stderr) + print(stderr, file=sys.stderr) sys.stderr.flush() if stdout is not None: print(stdout) diff --git a/jedi/evaluate/context/__init__.py b/jedi/evaluate/context/__init__.py index 4e7ce4d6..56f6495b 100644 --- a/jedi/evaluate/context/__init__.py +++ b/jedi/evaluate/context/__init__.py @@ -1,5 +1,6 @@ from jedi.evaluate.context.module import ModuleContext from jedi.evaluate.context.klass import ClassContext -from jedi.evaluate.context.function import FunctionContext, FunctionExecutionContext +from jedi.evaluate.context.function import FunctionContext, \ + MethodContext, FunctionExecutionContext from jedi.evaluate.context.instance import AnonymousInstance, BoundMethod, \ CompiledInstance, AbstractInstanceContext, TreeInstance diff --git a/jedi/evaluate/context/asynchronous.py b/jedi/evaluate/context/asynchronous.py deleted file mode 100644 index 51e59a48..00000000 --- a/jedi/evaluate/context/asynchronous.py +++ /dev/null @@ -1,38 +0,0 @@ -from jedi.evaluate.filters import publish_method, BuiltinOverwrite -from jedi.evaluate.base_context import ContextSet - - -class AsyncBase(BuiltinOverwrite): - def __init__(self, evaluator, func_execution_context): - super(AsyncBase, self).__init__(evaluator) - self.func_execution_context = func_execution_context - - @property - def name(self): - return self.get_object().name - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.func_execution_context) - - -class Coroutine(AsyncBase): - special_object_identifier = u'COROUTINE' - - @publish_method('__await__') - def _await(self): - return ContextSet(CoroutineWrapper(self.evaluator, self.func_execution_context)) - - -class CoroutineWrapper(AsyncBase): - special_object_identifier = u'COROUTINE_WRAPPER' - - def py__stop_iteration_returns(self): - return self.func_execution_context.get_return_values() - - -class AsyncGenerator(AsyncBase): - """Handling of `yield` functions.""" - special_object_identifier = u'ASYNC_GENERATOR' - - def py__aiter__(self): - return self.func_execution_context.get_yield_lazy_contexts(is_async=True) diff --git a/jedi/evaluate/context/function.py b/jedi/evaluate/context/function.py index 2bb3a9b8..c48ab5fc 100644 --- a/jedi/evaluate/context/function.py +++ b/jedi/evaluate/context/function.py @@ -6,20 +6,20 @@ from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass from jedi.evaluate import compiled from jedi.evaluate import recursion from jedi.evaluate import docstrings -from jedi.evaluate import pep0484 from jedi.evaluate import flow_analysis from jedi.evaluate import helpers +from jedi.evaluate.signature import TreeSignature from jedi.evaluate.arguments import AnonymousArguments -from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \ - ContextName, AbstractNameDefinition, ParamName +from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter +from jedi.evaluate.names import ContextName, AbstractNameDefinition, ParamName from jedi.evaluate.base_context import ContextualizedNode, NO_CONTEXTS, \ - ContextSet, TreeContext + ContextSet, TreeContext, ContextWrapper from jedi.evaluate.lazy_context import LazyKnownContexts, LazyKnownContext, \ LazyTreeContext from jedi.evaluate.context import iterable -from jedi.evaluate.context import asynchronous from jedi import parser_utils from jedi.evaluate.parser_cache import get_yield_exprs +from jedi.evaluate.helpers import contexts_from_qualified_names class LambdaName(AbstractNameDefinition): @@ -35,21 +35,30 @@ class LambdaName(AbstractNameDefinition): return self._lambda_context.tree_node.start_pos def infer(self): - return ContextSet(self._lambda_context) + return ContextSet([self._lambda_context]) -class FunctionContext(use_metaclass(CachedMetaClass, TreeContext)): - """ - Needed because of decorators. Decorators are evaluated here. - """ +class FunctionAndClassMixin(object): + def get_qualified_names(self): + if self.parent_context.is_class(): + n = self.parent_context.get_qualified_names() + if n is None: + # This means that the parent class lives within a function. + return None + return n + [self.py__name__()] + elif self.parent_context.is_module(): + return [self.py__name__()] + else: + return None + + def py__name__(self): + return self.name.string_name + + +class FunctionMixin(FunctionAndClassMixin): api_type = u'function' - def __init__(self, evaluator, parent_context, funcdef): - """ This should not be called directly """ - super(FunctionContext, self).__init__(evaluator, parent_context) - self.tree_node = funcdef - - def get_filters(self, search_global, until_position=None, origin_scope=None): + def get_filters(self, search_global=False, until_position=None, origin_scope=None): if search_global: yield ParserTreeFilter( self.evaluator, @@ -62,46 +71,17 @@ class FunctionContext(use_metaclass(CachedMetaClass, TreeContext)): for filter in scope.get_filters(search_global=False, origin_scope=origin_scope): yield filter - def infer_function_execution(self, function_execution): - """ - Created to be used by inheritance. - """ - is_coroutine = self.tree_node.parent.type == 'async_stmt' - is_generator = bool(get_yield_exprs(self.evaluator, self.tree_node)) + def py__get__(self, instance, class_context): + from jedi.evaluate.context.instance import BoundMethod + if instance is None: + # Calling the Foo.bar results in the original bar function. + return ContextSet([self]) + return ContextSet([BoundMethod(instance, self)]) - if is_coroutine: - if is_generator: - if self.evaluator.environment.version_info < (3, 6): - return NO_CONTEXTS - return ContextSet(asynchronous.AsyncGenerator(self.evaluator, function_execution)) - else: - if self.evaluator.environment.version_info < (3, 5): - return NO_CONTEXTS - return ContextSet(asynchronous.Coroutine(self.evaluator, function_execution)) - else: - if is_generator: - return ContextSet(iterable.Generator(self.evaluator, function_execution)) - else: - return function_execution.get_return_values() - - def get_function_execution(self, arguments=None): - if arguments is None: - arguments = AnonymousArguments() - - return FunctionExecutionContext(self.evaluator, self.parent_context, self, arguments) - - def py__call__(self, arguments): - function_execution = self.get_function_execution(arguments) - return self.infer_function_execution(function_execution) - - def py__class__(self): - # This differentiation is only necessary for Python2. Python3 does not - # use a different method class. - if isinstance(parser_utils.get_parent_scope(self.tree_node), tree.Class): - name = u'METHOD_CLASS' - else: - name = u'FUNCTION_CLASS' - return compiled.get_special_object(self.evaluator, name) + def get_param_names(self): + function_execution = self.get_function_execution() + return [ParamName(function_execution, param.name) + for param in self.tree_node.get_params()] @property def name(self): @@ -109,10 +89,82 @@ class FunctionContext(use_metaclass(CachedMetaClass, TreeContext)): return LambdaName(self) return ContextName(self, self.tree_node.name) - def get_param_names(self): - function_execution = self.get_function_execution() - return [ParamName(function_execution, param.name) - for param in self.tree_node.get_params()] + def py__call__(self, arguments): + function_execution = self.get_function_execution(arguments) + return function_execution.infer() + + def get_function_execution(self, arguments=None): + if arguments is None: + arguments = AnonymousArguments() + + return FunctionExecutionContext(self.evaluator, self.parent_context, self, arguments) + + +class FunctionContext(use_metaclass(CachedMetaClass, FunctionMixin, TreeContext)): + """ + Needed because of decorators. Decorators are evaluated here. + """ + def is_function(self): + return True + + @classmethod + def from_context(cls, context, tree_node): + def create(tree_node): + if context.is_class(): + return MethodContext( + context.evaluator, + context, + parent_context=parent_context, + tree_node=tree_node + ) + else: + return cls( + context.evaluator, + parent_context=parent_context, + tree_node=tree_node + ) + + overloaded_funcs = list(_find_overload_functions(context, tree_node)) + + parent_context = context + while parent_context.is_class() or parent_context.is_instance(): + parent_context = parent_context.parent_context + + function = create(tree_node) + + if overloaded_funcs: + return OverloadedFunctionContext( + function, + [create(f) for f in overloaded_funcs] + ) + return function + + def py__class__(self): + c, = contexts_from_qualified_names(self.evaluator, u'types', u'FunctionType') + return c + + def get_default_param_context(self): + return self.parent_context + + def get_signatures(self): + return [TreeSignature(self)] + + +class MethodContext(FunctionContext): + def __init__(self, evaluator, class_context, *args, **kwargs): + super(MethodContext, self).__init__(evaluator, *args, **kwargs) + self.class_context = class_context + + def get_default_param_context(self): + return self.class_context + + def get_qualified_names(self): + # Need to implement this, because the parent context of a method + # context is not the class context but the module. + names = self.class_context.get_qualified_names() + if names is None: + return None + return names + [self.py__name__()] class FunctionExecutionContext(TreeContext): @@ -127,9 +179,12 @@ class FunctionExecutionContext(TreeContext): function_execution_filter = FunctionExecutionFilter def __init__(self, evaluator, parent_context, function_context, var_args): - super(FunctionExecutionContext, self).__init__(evaluator, parent_context) + super(FunctionExecutionContext, self).__init__( + evaluator, + parent_context, + function_context.tree_node, + ) self.function_context = function_context - self.tree_node = function_context.tree_node self.var_args = var_args @evaluator_method_cache(default=NO_CONTEXTS) @@ -144,8 +199,13 @@ class FunctionExecutionContext(TreeContext): returns = get_yield_exprs(self.evaluator, funcdef) else: returns = funcdef.iter_return_stmts() - context_set = docstrings.infer_return_types(self.function_context) - context_set |= pep0484.infer_return_types(self.function_context) + from jedi.evaluate.gradual.annotation import infer_return_types + context_set = infer_return_types(self) + if context_set: + # If there are annotations, prefer them over anything else. + # This will make it faster. + return context_set + context_set |= docstrings.infer_return_types(self.function_context) for r in returns: check = flow_analysis.reachability_check(self, funcdef, r) @@ -162,7 +222,7 @@ class FunctionExecutionContext(TreeContext): children = r.children except AttributeError: ctx = compiled.builtin_from_name(self.evaluator, u'None') - context_set |= ContextSet(ctx) + context_set |= ContextSet([ctx]) else: context_set |= self.eval_node(children[1]) if check is flow_analysis.REACHABLE: @@ -234,11 +294,178 @@ class FunctionExecutionContext(TreeContext): for result in self._get_yield_lazy_context(yield_in_same_for_stmt): yield result - def get_filters(self, search_global, until_position=None, origin_scope=None): + def merge_yield_contexts(self, is_async=False): + return ContextSet.from_sets( + lazy_context.infer() + for lazy_context in self.get_yield_lazy_contexts() + ) + + def get_filters(self, search_global=False, until_position=None, origin_scope=None): yield self.function_execution_filter(self.evaluator, self, until_position=until_position, origin_scope=origin_scope) @evaluator_method_cache() - def get_params(self): - return self.var_args.get_params(self) + def get_executed_params_and_issues(self): + return self.var_args.get_executed_params_and_issues(self) + + def matches_signature(self): + executed_params, issues = self.get_executed_params_and_issues() + if issues: + return False + + matches = all(executed_param.matches_signature() + for executed_param in executed_params) + if debug.enable_notice: + signature = parser_utils.get_call_signature(self.tree_node) + if matches: + debug.dbg("Overloading match: %s@%s (%s)", + signature, self.tree_node.start_pos[0], self.var_args, color='BLUE') + else: + debug.dbg("Overloading no match: %s@%s (%s)", + signature, self.tree_node.start_pos[0], self.var_args, color='BLUE') + return matches + + def infer(self): + """ + Created to be used by inheritance. + """ + evaluator = self.evaluator + is_coroutine = self.tree_node.parent.type == 'async_stmt' + is_generator = bool(get_yield_exprs(evaluator, self.tree_node)) + from jedi.evaluate.gradual.typing import AnnotatedSubClass + + if is_coroutine: + if is_generator: + if evaluator.environment.version_info < (3, 6): + return NO_CONTEXTS + async_generator_classes = evaluator.typing_module \ + .py__getattribute__('AsyncGenerator') + + yield_contexts = self.merge_yield_contexts(is_async=True) + # The contravariant doesn't seem to be defined. + generics = (yield_contexts.py__class__(), NO_CONTEXTS) + return ContextSet( + # In Python 3.6 AsyncGenerator is still a class. + AnnotatedSubClass(c, generics) + for c in async_generator_classes + ).execute_annotation() + else: + if evaluator.environment.version_info < (3, 5): + return NO_CONTEXTS + async_classes = evaluator.typing_module.py__getattribute__('Coroutine') + return_contexts = self.get_return_values() + # Only the first generic is relevant. + generics = (return_contexts.py__class__(), NO_CONTEXTS, NO_CONTEXTS) + return ContextSet( + AnnotatedSubClass(c, generics) for c in async_classes + ).execute_annotation() + else: + if is_generator: + return ContextSet([iterable.Generator(evaluator, self)]) + else: + return self.get_return_values() + + +class OverloadedFunctionContext(FunctionMixin, ContextWrapper): + def __init__(self, function, overloaded_functions): + super(OverloadedFunctionContext, self).__init__(function) + self.overloaded_functions = overloaded_functions + + def py__call__(self, arguments): + debug.dbg("Execute overloaded function %s", self._wrapped_context, color='BLUE') + function_executions = [] + context_set = NO_CONTEXTS + matched = False + for f in self.overloaded_functions: + function_execution = f.get_function_execution(arguments) + function_executions.append(function_execution) + if function_execution.matches_signature(): + matched = True + return function_execution.infer() + + if matched: + return context_set + + if self.evaluator.is_analysis: + # In this case we want precision. + return NO_CONTEXTS + return ContextSet.from_sets(fe.infer() for fe in function_executions) + + def get_signatures(self): + return [TreeSignature(f) for f in self.overloaded_functions] + + +def signature_matches(function_context, arguments): + unpacked_arguments = arguments.unpack() + key_args = {} + for param_node in function_context.tree_node.get_params(): + while True: + key, argument = next(unpacked_arguments, (None, None)) + if key is None or argument is None: + break + key_args[key] = argument + if argument is None: + argument = key_args.pop(param_node.name.value, None) + if argument is None: + # This signature has an parameter more than arguments were given. + return bool(param_node.star_count == 1) + + if param_node.annotation is not None: + if param_node.star_count == 2: + return False # TODO allow this + + annotation_contexts = function_context.evaluator.eval_element( + function_context.get_default_param_context(), + param_node.annotation + ) + argument_contexts = argument.infer().py__class__() + if not any(c1.is_sub_class_of(c2) + for c1 in argument_contexts + for c2 in annotation_contexts): + return False + return True + + +def _find_overload_functions(context, tree_node): + def _is_overload_decorated(funcdef): + if funcdef.parent.type == 'decorated': + decorators = funcdef.parent.children[0] + if decorators.type == 'decorator': + decorators = [decorators] + else: + decorators = decorators.children + for decorator in decorators: + dotted_name = decorator.children[1] + if dotted_name.type == 'name' and dotted_name.value == 'overload': + # TODO check with contexts if it's the right overload + return True + return False + + if tree_node.type == 'lambdef': + return + + if _is_overload_decorated(tree_node): + yield tree_node + + while True: + filter = ParserTreeFilter( + context.evaluator, + context, + until_position=tree_node.start_pos + ) + names = filter.get(tree_node.name.value) + assert isinstance(names, list) + if not names: + break + + found = False + for name in names: + funcdef = name.tree_name.parent + if funcdef.type == 'funcdef' and _is_overload_decorated(funcdef): + tree_node = funcdef + found = True + yield funcdef + + if not found: + break diff --git a/jedi/evaluate/context/instance.py b/jedi/evaluate/context/instance.py index def5e19a..0621e4c5 100644 --- a/jedi/evaluate/context/instance.py +++ b/jedi/evaluate/context/instance.py @@ -1,41 +1,60 @@ from abc import abstractproperty from jedi import debug +from jedi import settings from jedi.evaluate import compiled -from jedi.evaluate import filters +from jedi.evaluate.helpers import contexts_from_qualified_names +from jedi.evaluate.filters import AbstractFilter +from jedi.evaluate.names import ContextName, TreeNameDefinition from jedi.evaluate.base_context import Context, NO_CONTEXTS, ContextSet, \ - iterator_to_context_set + iterator_to_context_set, ContextWrapper from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts from jedi.evaluate.cache import evaluator_method_cache -from jedi.evaluate.arguments import AbstractArguments, AnonymousArguments -from jedi.cache import memoize_method -from jedi.evaluate.context.function import FunctionExecutionContext, FunctionContext -from jedi.evaluate.context.klass import ClassContext, apply_py__get__ +from jedi.evaluate.arguments import AnonymousArguments, \ + ValuesArguments, TreeArgumentsWrapper +from jedi.evaluate.context.function import FunctionExecutionContext, \ + FunctionContext, FunctionMixin, OverloadedFunctionContext +from jedi.evaluate.context.klass import ClassContext, apply_py__get__, \ + ClassFilter from jedi.evaluate.context import iterable from jedi.parser_utils import get_parent_scope -class BaseInstanceFunctionExecution(FunctionExecutionContext): - def __init__(self, instance, *args, **kwargs): - self.instance = instance - super(BaseInstanceFunctionExecution, self).__init__( - instance.evaluator, *args, **kwargs) +class InstanceExecutedParam(object): + def __init__(self, instance, tree_param): + self._instance = instance + self._tree_param = tree_param + self.string_name = self._tree_param.name.value + + def infer(self): + return ContextSet([self._instance]) + + def matches_signature(self): + return True -class InstanceFunctionExecution(BaseInstanceFunctionExecution): - def __init__(self, instance, parent_context, function_context, var_args): - var_args = InstanceVarArgs(self, var_args) +class AnonymousInstanceArguments(AnonymousArguments): + def __init__(self, instance): + self._instance = instance - super(InstanceFunctionExecution, self).__init__( - instance, parent_context, function_context, var_args) + def get_executed_params_and_issues(self, execution_context): + from jedi.evaluate.dynamic import search_params + tree_params = execution_context.tree_node.get_params() + if not tree_params: + return [], [] - -class AnonymousInstanceFunctionExecution(BaseInstanceFunctionExecution): - function_execution_filter = filters.AnonymousInstanceFunctionExecutionFilter - - def __init__(self, instance, parent_context, function_context, var_args): - super(AnonymousInstanceFunctionExecution, self).__init__( - instance, parent_context, function_context, var_args) + self_param = InstanceExecutedParam(self._instance, tree_params[0]) + if len(tree_params) == 1: + # If the only param is self, we don't need to try to find + # executions of this function, we have all the params already. + return [self_param], [] + executed_params = list(search_params( + execution_context.evaluator, + execution_context, + execution_context.tree_node + )) + executed_params[0] = self_param + return executed_params, [] class AbstractInstanceContext(Context): @@ -43,7 +62,6 @@ class AbstractInstanceContext(Context): This class is used to evaluate instances. """ api_type = u'instance' - function_execution_cls = InstanceFunctionExecution def __init__(self, evaluator, parent_context, class_context, var_args): super(AbstractInstanceContext, self).__init__(evaluator, parent_context) @@ -52,8 +70,11 @@ class AbstractInstanceContext(Context): self.class_context = class_context self.var_args = var_args - def is_class(self): - return False + def is_instance(self): + return True + + def get_annotated_class_object(self): + return self.class_context # This is the default. @property def py__call__(self): @@ -63,7 +84,7 @@ class AbstractInstanceContext(Context): raise AttributeError def execute(arguments): - return ContextSet.from_sets(name.execute(arguments) for name in names) + return ContextSet.from_sets(name.infer().execute(arguments) for name in names) return execute @@ -86,27 +107,29 @@ class AbstractInstanceContext(Context): def execute_function_slots(self, names, *evaluated_args): return ContextSet.from_sets( - name.execute_evaluated(*evaluated_args) + name.infer().execute_evaluated(*evaluated_args) for name in names ) - def py__get__(self, obj): + def py__get__(self, obj, class_context): + """ + obj may be None. + """ # Arguments in __get__ descriptors are obj, class. # `method` is the new parent of the array, don't know if that's good. names = self.get_function_slot_names(u'__get__') if names: - if isinstance(obj, AbstractInstanceContext): - return self.execute_function_slots(names, obj, obj.class_context) - else: - none_obj = compiled.builtin_from_name(self.evaluator, u'None') - return self.execute_function_slots(names, none_obj, obj) + if obj is None: + obj = compiled.builtin_from_name(self.evaluator, u'None') + return self.execute_function_slots(names, obj, class_context) else: - return ContextSet(self) + return ContextSet([self]) def get_filters(self, search_global=None, until_position=None, origin_scope=None, include_self_names=True): + class_context = self.get_annotated_class_object() if include_self_names: - for cls in self.class_context.py__mro__(): + for cls in class_context.py__mro__(): if not isinstance(cls, compiled.CompiledObject) \ or cls.tree_node is not None: # In this case we're excluding compiled objects that are @@ -114,65 +137,60 @@ class AbstractInstanceContext(Context): # compiled objects to search for self variables. yield SelfAttributeFilter(self.evaluator, self, cls, origin_scope) - for cls in self.class_context.py__mro__(): + for cls in class_context.py__mro__(): if isinstance(cls, compiled.CompiledObject): yield CompiledInstanceClassFilter(self.evaluator, self, cls) else: yield InstanceClassFilter(self.evaluator, self, cls, origin_scope) - def py__getitem__(self, index): - try: - names = self.get_function_slot_names(u'__getitem__') - except KeyError: - debug.warning('No __getitem__, cannot access the array.') + def py__getitem__(self, index_context_set, contextualized_node): + names = self.get_function_slot_names(u'__getitem__') + if not names: + debug.warning('Found no __getitem__ on %s', self) return NO_CONTEXTS - else: - index_obj = compiled.create_simple_object(self.evaluator, index) - return self.execute_function_slots(names, index_obj) - def py__iter__(self): + args = ValuesArguments([index_context_set]) + return ContextSet.from_sets(name.infer().execute(args) for name in names) + + def py__iter__(self, contextualized_node=None): iter_slot_names = self.get_function_slot_names(u'__iter__') if not iter_slot_names: - debug.warning('No __iter__ on %s.' % self) - return + return super(AbstractInstanceContext, self).py__iter__(contextualized_node) - for generator in self.execute_function_slots(iter_slot_names): - if isinstance(generator, AbstractInstanceContext): - # `__next__` logic. - if self.evaluator.environment.version_info.major == 2: - name = u'next' + def iterate(): + for generator in self.execute_function_slots(iter_slot_names): + if generator.is_instance(): + # `__next__` logic. + if self.evaluator.environment.version_info.major == 2: + name = u'next' + else: + name = u'__next__' + next_slot_names = generator.get_function_slot_names(name) + if next_slot_names: + yield LazyKnownContexts( + generator.execute_function_slots(next_slot_names) + ) + else: + debug.warning('Instance has no __next__ function in %s.', generator) else: - name = u'__next__' - iter_slot_names = generator.get_function_slot_names(name) - if iter_slot_names: - yield LazyKnownContexts( - generator.execute_function_slots(iter_slot_names) - ) - else: - debug.warning('Instance has no __next__ function in %s.', generator) - else: - for lazy_context in generator.py__iter__(): - yield lazy_context + for lazy_context in generator.py__iter__(): + yield lazy_context + return iterate() @abstractproperty def name(self): pass - def _create_init_execution(self, class_context, func_node): - bound_method = BoundMethod( - self.evaluator, self, class_context, self.parent_context, func_node - ) - return self.function_execution_cls( - self, - class_context.parent_context, - bound_method, - self.var_args - ) - def create_init_executions(self): for name in self.get_function_slot_names(u'__init__'): - if isinstance(name, SelfName): - yield self._create_init_execution(name.class_context, name.tree_name.parent) + # TODO is this correct? I think we need to check for functions. + if isinstance(name, LazyInstanceClassName): + function = FunctionContext.from_context( + self.parent_context, + name.tree_name.parent + ) + bound_method = BoundMethod(self, function) + yield bound_method.get_function_execution(self.var_args) @evaluator_method_cache() def create_instance_context(self, class_context, node): @@ -184,13 +202,14 @@ class AbstractInstanceContext(Context): else: parent_context = self.create_instance_context(class_context, scope) if scope.type == 'funcdef': + func = FunctionContext.from_context( + parent_context, + scope, + ) + bound_method = BoundMethod(self, func) if scope.name.value == '__init__' and parent_context == class_context: - return self._create_init_execution(class_context, scope) + return bound_method.get_function_execution(self.var_args) else: - bound_method = BoundMethod( - self.evaluator, self, class_context, - parent_context, scope - ) return bound_method.get_function_execution() elif scope.type == 'classdef': class_context = ClassContext(self.evaluator, parent_context, scope) @@ -202,22 +221,19 @@ class AbstractInstanceContext(Context): raise NotImplementedError return class_context + def get_signatures(self): + init_funcs = self.py__getattribute__('__call__') + return [sig.bind(self) for sig in init_funcs.get_signatures()] + def __repr__(self): return "<%s of %s(%s)>" % (self.__class__.__name__, self.class_context, self.var_args) class CompiledInstance(AbstractInstanceContext): - def __init__(self, *args, **kwargs): - super(CompiledInstance, self).__init__(*args, **kwargs) - # I don't think that dynamic append lookups should happen here. That - # sounds more like something that should go to py__iter__. - self._original_var_args = self.var_args - - if self.class_context.name.string_name in ['list', 'set'] \ - and self.parent_context.get_root_context() == self.evaluator.builtins_module: - # compare the module path with the builtin name. - self.var_args = iterable.get_dynamic_array_instance(self) + def __init__(self, evaluator, parent_context, class_context, var_args): + self._original_var_args = var_args + super(CompiledInstance, self).__init__(evaluator, parent_context, class_context, var_args) @property def name(self): @@ -239,99 +255,167 @@ class CompiledInstance(AbstractInstanceContext): class TreeInstance(AbstractInstanceContext): def __init__(self, evaluator, parent_context, class_context, var_args): + # I don't think that dynamic append lookups should happen here. That + # sounds more like something that should go to py__iter__. + if class_context.py__name__() in ['list', 'set'] \ + and parent_context.get_root_context() == evaluator.builtins_module: + # compare the module path with the builtin name. + if settings.dynamic_array_additions: + var_args = iterable.get_dynamic_array_instance(self, var_args) + super(TreeInstance, self).__init__(evaluator, parent_context, class_context, var_args) self.tree_node = class_context.tree_node @property def name(self): - return filters.ContextName(self, self.class_context.name.tree_name) + return ContextName(self, self.class_context.name.tree_name) + + # This can recurse, if the initialization of the class includes a reference + # to itself. + @evaluator_method_cache(default=None) + def _get_annotated_class_object(self): + from jedi.evaluate.gradual.annotation import py__annotations__, \ + infer_type_vars_for_execution + + for func in self._get_annotation_init_functions(): + # Just take the first result, it should always be one, because we + # control the typeshed code. + bound = BoundMethod(self, func) + execution = bound.get_function_execution(self.var_args) + if not execution.matches_signature(): + # First check if the signature even matches, if not we don't + # need to infer anything. + continue + + all_annotations = py__annotations__(execution.tree_node) + defined = self.class_context.define_generics( + infer_type_vars_for_execution(execution, all_annotations), + ) + debug.dbg('Inferred instance context as %s', defined, color='BLUE') + return defined + return None + + def get_annotated_class_object(self): + return self._get_annotated_class_object() or self.class_context + + def _get_annotation_init_functions(self): + filter = next(self.class_context.get_filters()) + for init_name in filter.get('__init__'): + for init in init_name.infer(): + if init.is_function(): + for signature in init.get_signatures(): + yield signature.context class AnonymousInstance(TreeInstance): - function_execution_cls = AnonymousInstanceFunctionExecution - def __init__(self, evaluator, parent_context, class_context): super(AnonymousInstance, self).__init__( evaluator, parent_context, class_context, - var_args=AnonymousArguments(), + var_args=AnonymousInstanceArguments(self), ) + def get_annotated_class_object(self): + return self.class_context # This is the default. + class CompiledInstanceName(compiled.CompiledName): - def __init__(self, evaluator, instance, parent_context, name): - super(CompiledInstanceName, self).__init__(evaluator, parent_context, name) + + def __init__(self, evaluator, instance, klass, name): + super(CompiledInstanceName, self).__init__( + evaluator, + klass.parent_context, + name.string_name + ) self._instance = instance + self._class = klass + self._class_member_name = name @iterator_to_context_set def infer(self): - for result_context in super(CompiledInstanceName, self).infer(): - is_function = result_context.api_type == 'function' - if result_context.tree_node is not None and is_function: - parent_context = result_context.parent_context - while parent_context.is_class(): - parent_context = parent_context.parent_context - - yield BoundMethod( - result_context.evaluator, self._instance, self.parent_context, - parent_context, result_context.tree_node - ) + for result_context in self._class_member_name.infer(): + if result_context.api_type == 'function': + yield CompiledBoundMethod(result_context) else: - if is_function: - yield CompiledBoundMethod(result_context) - else: - yield result_context + yield result_context -class CompiledInstanceClassFilter(compiled.CompiledObjectFilter): +class CompiledInstanceClassFilter(AbstractFilter): name_class = CompiledInstanceName - def __init__(self, evaluator, instance, compiled_object): - super(CompiledInstanceClassFilter, self).__init__( - evaluator, - compiled_object, - is_instance=True, - ) + def __init__(self, evaluator, instance, klass): + self._evaluator = evaluator self._instance = instance + self._class = klass + self._class_filter = next(klass.get_filters(is_instance=True)) - def _create_name(self, name): - return self.name_class( - self._evaluator, self._instance, self._compiled_object, name) + def get(self, name): + return self._convert(self._class_filter.get(name)) + + def values(self): + return self._convert(self._class_filter.values()) + + def _convert(self, names): + return [ + CompiledInstanceName(self._evaluator, self._instance, self._class, n) + for n in names + ] -class BoundMethod(FunctionContext): - def __init__(self, evaluator, instance, class_context, *args, **kwargs): - super(BoundMethod, self).__init__(evaluator, *args, **kwargs) - self._instance = instance - self._class_context = class_context +class BoundMethod(FunctionMixin, ContextWrapper): + def __init__(self, instance, function): + super(BoundMethod, self).__init__(function) + self.instance = instance + + def py__class__(self): + c, = contexts_from_qualified_names(self.evaluator, u'types', u'MethodType') + return c + + def _get_arguments(self, arguments): + if arguments is None: + arguments = AnonymousInstanceArguments(self.instance) + + return InstanceArguments(self.instance, arguments) def get_function_execution(self, arguments=None): - if arguments is None: - arguments = AnonymousArguments() - return AnonymousInstanceFunctionExecution( - self._instance, self.parent_context, self, arguments) - else: - return InstanceFunctionExecution( - self._instance, self.parent_context, self, arguments) + arguments = self._get_arguments(arguments) + + if isinstance(self._wrapped_context, compiled.CompiledObject): + # This is kind of weird, because it's coming from a compiled object + # and we're not sure if we want that in the future. + # TODO remove?! + return FunctionExecutionContext( + self.evaluator, self.parent_context, self, arguments + ) + + return super(BoundMethod, self).get_function_execution(arguments) + + def py__call__(self, arguments): + if isinstance(self._wrapped_context, OverloadedFunctionContext): + return self._wrapped_context.py__call__(self._get_arguments(arguments)) + + function_execution = self.get_function_execution(arguments) + return function_execution.infer() + + def get_signatures(self): + return [sig.bind(self) for sig in self._wrapped_context.get_signatures()] + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._wrapped_context) class CompiledBoundMethod(compiled.CompiledObject): def __init__(self, func): super(CompiledBoundMethod, self).__init__( - func.evaluator, func.access_handle, func.parent_context, func.tree_node) + func.evaluator, func.access_handle, func.parent_context) def get_param_names(self): return list(super(CompiledBoundMethod, self).get_param_names())[1:] -class InstanceNameDefinition(filters.TreeNameDefinition): - def infer(self): - return super(InstanceNameDefinition, self).infer() - - -class SelfName(filters.TreeNameDefinition): +class SelfName(TreeNameDefinition): """ This name calculates the parent_context lazily. """ @@ -345,108 +429,103 @@ class SelfName(filters.TreeNameDefinition): return self._instance.create_instance_context(self.class_context, self.tree_name) -class LazyInstanceClassName(SelfName): +class LazyInstanceClassName(object): + def __init__(self, instance, class_context, class_member_name): + self._instance = instance + self.class_context = class_context + self._class_member_name = class_member_name + @iterator_to_context_set def infer(self): - for result_context in super(LazyInstanceClassName, self).infer(): - if isinstance(result_context, FunctionContext): - # Classes are never used to resolve anything within the - # functions. Only other functions and modules will resolve - # those things. - parent_context = result_context.parent_context - while parent_context.is_class(): - parent_context = parent_context.parent_context + for result_context in self._class_member_name.infer(): + for c in apply_py__get__(result_context, self._instance, self.class_context): + yield c - yield BoundMethod( - result_context.evaluator, self._instance, self.class_context, - parent_context, result_context.tree_node - ) - else: - for c in apply_py__get__(result_context, self._instance): - yield c + def __getattr__(self, name): + return getattr(self._class_member_name, name) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._class_member_name) -class InstanceClassFilter(filters.ParserTreeFilter): - name_class = LazyInstanceClassName - +class InstanceClassFilter(AbstractFilter): + """ + This filter is special in that it uses the class filter and wraps the + resulting names in LazyINstanceClassName. The idea is that the class name + filtering can be very flexible and always be reflected in instances. + """ def __init__(self, evaluator, context, class_context, origin_scope): - super(InstanceClassFilter, self).__init__( - evaluator=evaluator, - context=context, - node_context=class_context, - origin_scope=origin_scope - ) + self._instance = context self._class_context = class_context + self._class_filter = next(class_context.get_filters( + search_global=False, + origin_scope=origin_scope, + is_instance=True, + )) - def _equals_origin_scope(self): - node = self._origin_scope - while node is not None: - if node == self._parser_scope or node == self.context: - return True - node = get_parent_scope(node) - return False + def get(self, name): + return self._convert(self._class_filter.get(name)) - def _access_possible(self, name): - return not name.value.startswith('__') or name.value.endswith('__') \ - or self._equals_origin_scope() + def values(self): + return self._convert(self._class_filter.values()) - def _filter(self, names): - names = super(InstanceClassFilter, self)._filter(names) - return [name for name in names if self._access_possible(name)] + def _convert(self, names): + return [LazyInstanceClassName(self._instance, self._class_context, n) for n in names] - def _convert_names(self, names): - return [self.name_class(self.context, self._class_context, name) for name in names] + def __repr__(self): + return '<%s for %s>' % (self.__class__.__name__, self._class_context) -class SelfAttributeFilter(InstanceClassFilter): +class SelfAttributeFilter(ClassFilter): """ This class basically filters all the use cases where `self.*` was assigned. """ name_class = SelfName + def __init__(self, evaluator, context, class_context, origin_scope): + super(SelfAttributeFilter, self).__init__( + evaluator=evaluator, + context=context, + node_context=class_context, + origin_scope=origin_scope, + is_instance=True, + ) + self._class_context = class_context + def _filter(self, names): names = self._filter_self_names(names) - if isinstance(self._parser_scope, compiled.CompiledObject) and False: - # This would be for builtin skeletons, which are not yet supported. - return list(names) - else: - start, end = self._parser_scope.start_pos, self._parser_scope.end_pos - return [n for n in names if start < n.start_pos < end] + start, end = self._parser_scope.start_pos, self._parser_scope.end_pos + return [n for n in names if start < n.start_pos < end] def _filter_self_names(self, names): for name in names: trailer = name.parent if trailer.type == 'trailer' \ - and len(trailer.children) == 2 \ + and len(trailer.parent.children) == 2 \ and trailer.children[0] == '.': if name.is_definition() and self._access_possible(name): + # TODO filter non-self assignments. yield name + def _convert_names(self, names): + return [self.name_class(self.context, self._class_context, name) for name in names] + def _check_flows(self, names): return names -class InstanceVarArgs(AbstractArguments): - def __init__(self, execution_context, var_args): - self._execution_context = execution_context - self._var_args = var_args - - @memoize_method - def _get_var_args(self): - return self._var_args - - @property - def argument_node(self): - return self._var_args.argument_node - - @property - def trailer(self): - return self._var_args.trailer +class InstanceArguments(TreeArgumentsWrapper): + def __init__(self, instance, arguments): + super(InstanceArguments, self).__init__(arguments) + self.instance = instance def unpack(self, func=None): - yield None, LazyKnownContext(self._execution_context.instance) - for values in self._get_var_args().unpack(func): + yield None, LazyKnownContext(self.instance) + for values in self._wrapped_arguments.unpack(func): yield values - def get_calling_nodes(self): - return self._get_var_args().get_calling_nodes() + def get_executed_params_and_issues(self, execution_context): + if isinstance(self._wrapped_arguments, AnonymousInstanceArguments): + return self._wrapped_arguments.get_executed_params_and_issues(execution_context) + + return super(InstanceArguments, self).get_executed_params_and_issues(execution_context) diff --git a/jedi/evaluate/context/iterable.py b/jedi/evaluate/context/iterable.py index 06a769b8..81dac51d 100644 --- a/jedi/evaluate/context/iterable.py +++ b/jedi/evaluate/context/iterable.py @@ -30,26 +30,36 @@ from jedi.evaluate import recursion from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \ LazyTreeContext from jedi.evaluate.helpers import get_int_or_none, is_string, \ - predefine_names, evaluate_call_of_leaf, reraise_as_evaluator, \ - EvaluatorKeyError -from jedi.evaluate.utils import safe_property -from jedi.evaluate.utils import to_list + predefine_names, evaluate_call_of_leaf, reraise_getitem_errors, \ + SimpleGetItemNotFound +from jedi.evaluate.utils import safe_property, to_list from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.helpers import execute_evaluated from jedi.evaluate.filters import ParserTreeFilter, BuiltinOverwrite, \ publish_method -from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, Context, \ - TreeContext, ContextualizedNode +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, \ + TreeContext, ContextualizedNode, iterate_contexts, HelperContextMixin from jedi.parser_utils import get_comp_fors class IterableMixin(object): def py__stop_iteration_returns(self): - return ContextSet(compiled.builtin_from_name(self.evaluator, u'None')) + return ContextSet([compiled.builtin_from_name(self.evaluator, u'None')]) class GeneratorBase(BuiltinOverwrite, IterableMixin): array_type = None - special_object_identifier = u'GENERATOR_OBJECT' + + @memoize_method + def get_object(self): + generator, = self.evaluator.typing_module \ + .py__getattribute__('Generator') \ + .execute_annotation() + return generator + + @publish_method('__iter__') + def py__iter__(self, contextualized_node=None): + return ContextSet([self]) @publish_method('send') @publish_method('next', python_version_match=2) @@ -57,6 +67,9 @@ class GeneratorBase(BuiltinOverwrite, IterableMixin): def py__next__(self): return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__()) + def py__stop_iteration_returns(self): + return ContextSet([compiled.builtin_from_name(self.evaluator, u'None')]) + @property def name(self): return compiled.CompiledContextName(self, 'generator') @@ -68,7 +81,7 @@ class Generator(GeneratorBase): super(Generator, self).__init__(evaluator) self._func_execution_context = func_execution_context - def py__iter__(self): + def py__iter__(self, contextualized_node=None): return self._func_execution_context.get_yield_lazy_contexts() def py__stop_iteration_returns(self): @@ -83,14 +96,10 @@ class CompForContext(TreeContext): def from_comp_for(cls, parent_context, comp_for): return cls(parent_context.evaluator, parent_context, comp_for) - def __init__(self, evaluator, parent_context, comp_for): - super(CompForContext, self).__init__(evaluator, parent_context) - self.tree_node = comp_for - def get_node(self): return self.tree_node - def get_filters(self, search_global, until_position=None, origin_scope=None): + def get_filters(self, search_global=False, until_position=None, origin_scope=None): yield ParserTreeFilter(self.evaluator, self) @@ -174,7 +183,7 @@ class ComprehensionMixin(object): for result in self._nested(comp_fors): yield result - def py__iter__(self): + def py__iter__(self, contextualized_node=None): for set_ in self._iterate(): yield LazyKnownContexts(set_) @@ -182,6 +191,11 @@ class ComprehensionMixin(object): return "<%s of %s>" % (type(self).__name__, self._atom) +class _DictMixin(object): + def _get_generics(self): + return tuple(c_set.py__class__() for c_set in self.get_mapping_item_contexts()) + + class Sequence(BuiltinOverwrite, IterableMixin): api_type = u'instance' @@ -189,11 +203,15 @@ class Sequence(BuiltinOverwrite, IterableMixin): def name(self): return compiled.CompiledContextName(self, self.array_type) + def _get_generics(self): + return (self.merge_types_of_iterate().py__class__(),) + @memoize_method def get_object(self): - compiled_obj = compiled.builtin_from_name(self.evaluator, self.array_type) - only_obj, = compiled_obj.execute_evaluated(self) - return only_obj + from jedi.evaluate.gradual.typing import AnnotatedSubClass + klass = compiled.builtin_from_name(self.evaluator, self.array_type) + # TODO is this execute annotation wrong? it returns a context set?! + return AnnotatedSubClass(klass, self._get_generics()).execute_annotation() def py__bool__(self): return None # We don't know the length, because of appends. @@ -205,22 +223,21 @@ class Sequence(BuiltinOverwrite, IterableMixin): def parent(self): return self.evaluator.builtins_module - def dict_values(self): - return ContextSet.from_sets( - self._defining_context.eval_node(v) - for k, v in self._items() - ) + def py__getitem__(self, index_context_set, contextualized_node): + if self.array_type == 'dict': + return self._dict_values() + return iterate_contexts(ContextSet([self])) class ListComprehension(ComprehensionMixin, Sequence): array_type = u'list' - def py__getitem__(self, index): + def py__simple_getitem__(self, index): if isinstance(index, slice): - return ContextSet(self) + return ContextSet([self]) all_types = list(self.py__iter__()) - with reraise_as_evaluator(IndexError, TypeError): + with reraise_getitem_errors(IndexError, TypeError): lazy_context = all_types[index] return lazy_context.infer() @@ -229,31 +246,34 @@ class SetComprehension(ComprehensionMixin, Sequence): array_type = u'set' -class DictComprehension(ComprehensionMixin, Sequence): +class DictComprehension(_DictMixin, ComprehensionMixin, Sequence): array_type = u'dict' def _get_comp_for(self): return self._get_comprehension().children[3] - def py__iter__(self): + def py__iter__(self, contextualized_node=None): for keys, values in self._iterate(): yield LazyKnownContexts(keys) - def py__getitem__(self, index): + def py__simple_getitem__(self, index): for keys, values in self._iterate(): for k in keys: if isinstance(k, compiled.CompiledObject): if k.get_safe_value(default=object()) == index: return values - return self.dict_values() + raise SimpleGetItemNotFound() - def dict_values(self): + def _dict_keys(self): + return ContextSet.from_sets(keys for keys, values in self._iterate()) + + def _dict_values(self): return ContextSet.from_sets(values for keys, values in self._iterate()) @publish_method('values') def _imitate_values(self): - lazy_context = LazyKnownContexts(self.dict_values()) - return ContextSet(FakeSequence(self.evaluator, u'list', [lazy_context])) + lazy_context = LazyKnownContexts(self._dict_values()) + return ContextSet([FakeSequence(self.evaluator, u'list', [lazy_context])]) @publish_method('items') def _imitate_items(self): @@ -269,7 +289,15 @@ class DictComprehension(ComprehensionMixin, Sequence): for key, value in self._iterate() ] - return ContextSet(FakeSequence(self.evaluator, u'list', lazy_contexts)) + return ContextSet([FakeSequence(self.evaluator, u'list', lazy_contexts)]) + + def get_mapping_item_contexts(self): + return self._dict_keys(), self._dict_values() + + def exact_key_items(self): + # NOTE: A smarter thing can probably done here to achieve better + # completions, but at least like this jedi doesn't crash + return [] class GeneratorComprehension(ComprehensionMixin, GeneratorBase): @@ -277,6 +305,7 @@ class GeneratorComprehension(ComprehensionMixin, GeneratorBase): class SequenceLiteralContext(Sequence): + _TUPLE_LIKE = 'testlist_star_expr', 'testlist', 'subscriptlist' mapping = {'(': u'tuple', '[': u'list', '{': u'set'} @@ -286,63 +315,69 @@ class SequenceLiteralContext(Sequence): self.atom = atom self._defining_context = defining_context - if self.atom.type in ('testlist_star_expr', 'testlist'): + if self.atom.type in self._TUPLE_LIKE: self.array_type = u'tuple' else: self.array_type = SequenceLiteralContext.mapping[atom.children[0]] """The builtin name of the array (list, set, tuple or dict).""" - def py__getitem__(self, index): + def py__simple_getitem__(self, index): """Here the index is an int/str. Raises IndexError/KeyError.""" if self.array_type == u'dict': compiled_obj_index = compiled.create_simple_object(self.evaluator, index) - for key, value in self._items(): + for key, value in self.get_tree_entries(): for k in self._defining_context.eval_node(key): - if isinstance(k, compiled.CompiledObject) \ - and k.execute_operation(compiled_obj_index, u'==').get_safe_value(): - return self._defining_context.eval_node(value) - raise EvaluatorKeyError('No key found in dictionary %s.' % self) + try: + method = k.execute_operation + except AttributeError: + pass + else: + if method(compiled_obj_index, u'==').get_safe_value(): + return self._defining_context.eval_node(value) + raise SimpleGetItemNotFound('No key found in dictionary %s.' % self) - # Can raise an IndexError if isinstance(index, slice): - return ContextSet(self) + return ContextSet([self]) else: - with reraise_as_evaluator(TypeError, KeyError, IndexError): - node = self._items()[index] + with reraise_getitem_errors(TypeError, KeyError, IndexError): + node = self.get_tree_entries()[index] return self._defining_context.eval_node(node) - def py__iter__(self): + def py__iter__(self, contextualized_node=None): """ While values returns the possible values for any array field, this function returns the value for a certain index. """ if self.array_type == u'dict': # Get keys. - types = ContextSet() - for k, _ in self._items(): + types = NO_CONTEXTS + for k, _ in self.get_tree_entries(): types |= self._defining_context.eval_node(k) # We don't know which dict index comes first, therefore always # yield all the types. for _ in types: yield LazyKnownContexts(types) else: - for node in self._items(): + for node in self.get_tree_entries(): yield LazyTreeContext(self._defining_context, node) for addition in check_array_additions(self._defining_context, self): yield addition - def _values(self): - """Returns a list of a list of node.""" - if self.array_type == u'dict': - return ContextSet.from_sets(v for k, v in self._items()) - else: - return self._items() + def py__len__(self): + # This function is not really used often. It's more of a try. + return len(self.get_tree_entries()) - def _items(self): + def _dict_values(self): + return ContextSet.from_sets( + self._defining_context.eval_node(v) + for k, v in self.get_tree_entries() + ) + + def get_tree_entries(self): c = self.atom.children - if self.atom.type in ('testlist_star_expr', 'testlist'): + if self.atom.type in self._TUPLE_LIKE: return c[::2] array_node = c[1] @@ -350,28 +385,46 @@ class SequenceLiteralContext(Sequence): return [] # Direct closing bracket, doesn't contain items. if array_node.type == 'testlist_comp': - return array_node.children[::2] + # filter out (for now) pep 448 single-star unpacking + return [value for value in array_node.children[::2] + if value.type != "star_expr"] elif array_node.type == 'dictorsetmaker': kv = [] iterator = iter(array_node.children) for key in iterator: - op = next(iterator, None) - if op is None or op == ',': - kv.append(key) # A set. - else: - assert op == ':' # A dict. - kv.append((key, next(iterator))) + if key == "**": + # dict with pep 448 double-star unpacking + # for now ignoring the values imported by ** + next(iterator) next(iterator, None) # Possible comma. + else: + op = next(iterator, None) + if op is None or op == ',': + if key.type == "star_expr": + # pep 448 single-star unpacking + # for now ignoring values imported by * + pass + else: + kv.append(key) # A set. + else: + assert op == ':' # A dict. + kv.append((key, next(iterator))) + next(iterator, None) # Possible comma. return kv else: - return [array_node] + if array_node.type == "star_expr": + # pep 448 single-star unpacking + # for now ignoring values imported by * + return [] + else: + return [array_node] def exact_key_items(self): """ Returns a generator of tuples like dict.items(), where the key is resolved (as a string) and the values are still lazy contexts. """ - for key_node, value in self._items(): + for key_node, value in self.get_tree_entries(): for key in self._defining_context.eval_node(key_node): if is_string(key): yield key.get_safe_value(), LazyTreeContext(self._defining_context, value) @@ -380,7 +433,7 @@ class SequenceLiteralContext(Sequence): return "<%s of %s>" % (self.__class__.__name__, self.atom) -class DictLiteralContext(SequenceLiteralContext): +class DictLiteralContext(_DictMixin, SequenceLiteralContext): array_type = u'dict' def __init__(self, evaluator, defining_context, atom): @@ -390,8 +443,8 @@ class DictLiteralContext(SequenceLiteralContext): @publish_method('values') def _imitate_values(self): - lazy_context = LazyKnownContexts(self.dict_values()) - return ContextSet(FakeSequence(self.evaluator, u'list', [lazy_context])) + lazy_context = LazyKnownContexts(self._dict_values()) + return ContextSet([FakeSequence(self.evaluator, u'list', [lazy_context])]) @publish_method('items') def _imitate_items(self): @@ -400,10 +453,19 @@ class DictLiteralContext(SequenceLiteralContext): self.evaluator, u'tuple', (LazyTreeContext(self._defining_context, key_node), LazyTreeContext(self._defining_context, value_node)) - )) for key_node, value_node in self._items() + )) for key_node, value_node in self.get_tree_entries() ] - return ContextSet(FakeSequence(self.evaluator, u'list', lazy_contexts)) + return ContextSet([FakeSequence(self.evaluator, u'list', lazy_contexts)]) + + def _dict_keys(self): + return ContextSet.from_sets( + self._defining_context.eval_node(k) + for k, v in self.get_tree_entries() + ) + + def get_mapping_item_contexts(self): + return self._dict_keys(), self._dict_values() class _FakeArray(SequenceLiteralContext): @@ -422,12 +484,12 @@ class FakeSequence(_FakeArray): super(FakeSequence, self).__init__(evaluator, None, array_type) self._lazy_context_list = lazy_context_list - def py__getitem__(self, index): - with reraise_as_evaluator(IndexError, TypeError): + def py__simple_getitem__(self, index): + with reraise_getitem_errors(IndexError, TypeError): lazy_context = self._lazy_context_list[index] return lazy_context.infer() - def py__iter__(self): + def py__iter__(self, contextualized_node=None): return self._lazy_context_list def py__bool__(self): @@ -437,16 +499,16 @@ class FakeSequence(_FakeArray): return "<%s of %s>" % (type(self).__name__, self._lazy_context_list) -class FakeDict(_FakeArray): +class FakeDict(_DictMixin, _FakeArray): def __init__(self, evaluator, dct): super(FakeDict, self).__init__(evaluator, dct, u'dict') self._dct = dct - def py__iter__(self): + def py__iter__(self, contextualized_node=None): for key in self._dct: yield LazyKnownContext(compiled.create_simple_object(self.evaluator, key)) - def py__getitem__(self, index): + def py__simple_getitem__(self, index): if is_py3 and self.evaluator.environment.version_info.major == 2: # In Python 2 bytes and unicode compare. if isinstance(index, bytes): @@ -462,20 +524,26 @@ class FakeDict(_FakeArray): except KeyError: pass - with reraise_as_evaluator(KeyError): + with reraise_getitem_errors(KeyError): lazy_context = self._dct[index] return lazy_context.infer() @publish_method('values') def _values(self): - return ContextSet(FakeSequence( + return ContextSet([FakeSequence( self.evaluator, u'tuple', - [LazyKnownContexts(self.dict_values())] - )) + [LazyKnownContexts(self._dict_values())] + )]) - def dict_values(self): + def _dict_values(self): return ContextSet.from_sets(lazy_context.infer() for lazy_context in self._dct.values()) + def _dict_keys(self): + return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__()) + + def get_mapping_item_contexts(self): + return self._dict_keys(), self._dict_values() + def exact_key_items(self): return self._dct.items() @@ -485,17 +553,17 @@ class MergedArray(_FakeArray): super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].array_type) self._arrays = arrays - def py__iter__(self): + def py__iter__(self, contextualized_node=None): for array in self._arrays: for lazy_context in array.py__iter__(): yield lazy_context - def py__getitem__(self, index): + def py__simple_getitem__(self, index): return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__()) - def _items(self): + def get_tree_entries(self): for array in self._arrays: - for a in array._items(): + for a in array.get_tree_entries(): yield a def __len__(self): @@ -566,7 +634,7 @@ def _check_array_additions(context, sequence): module_context = context.get_root_context() if not settings.dynamic_array_additions or isinstance(module_context, compiled.CompiledObject): debug.dbg('Dynamic array search aborted.', color='MAGENTA') - return ContextSet() + return NO_CONTEXTS def find_additions(context, arglist, add_name): params = list(arguments.TreeArguments(context.evaluator, context, arglist).unpack()) @@ -574,8 +642,8 @@ def _check_array_additions(context, sequence): if add_name in ['insert']: params = params[1:] if add_name in ['append', 'add', 'insert']: - for key, whatever in params: - result.add(whatever) + for key, lazy_context in params: + result.add(lazy_context) elif add_name in ['extend', 'update']: for key, lazy_context in params: result |= set(lazy_context.infer().iterate()) @@ -634,32 +702,28 @@ def _check_array_additions(context, sequence): return added_types -def get_dynamic_array_instance(instance): +def get_dynamic_array_instance(instance, arguments): """Used for set() and list() instances.""" - if not settings.dynamic_array_additions: - return instance.var_args - - ai = _ArrayInstance(instance) + ai = _ArrayInstance(instance, arguments) from jedi.evaluate import arguments - return arguments.ValuesArguments([ContextSet(ai)]) + return arguments.ValuesArguments([ContextSet([ai])]) -class _ArrayInstance(object): +class _ArrayInstance(HelperContextMixin): """ Used for the usage of set() and list(). This is definitely a hack, but a good one :-) It makes it possible to use set/list conversions. - - In contrast to Array, ListComprehension and all other iterable types, this - is something that is only used inside `evaluate/compiled/fake/builtins.py` - and therefore doesn't need filters, `py__bool__` and so on, because - we don't use these operations in `builtins.py`. """ - def __init__(self, instance): + def __init__(self, instance, var_args): self.instance = instance - self.var_args = instance.var_args + self.var_args = var_args - def py__iter__(self): + def py__class__(self): + tuple_, = self.instance.evaluator.builtins_module.py__getattribute__('tuple') + return tuple_ + + def py__iter__(self, contextualized_node=None): var_args = self.var_args try: _, lazy_context = next(var_args.unpack()) @@ -676,21 +740,24 @@ class _ArrayInstance(object): yield addition def iterate(self, contextualized_node=None, is_async=False): - return self.py__iter__() + return self.py__iter__(contextualized_node) -class Slice(Context): +class Slice(object): def __init__(self, context, start, stop, step): - super(Slice, self).__init__( - context.evaluator, - parent_context=context.evaluator.builtins_module - ) self._context = context - # all of them are either a Precedence or None. + self._slice_object = None + # All of them are either a Precedence or None. self._start = start self._stop = stop self._step = step + def __getattr__(self, name): + if self._slice_object is None: + context = compiled.builtin_from_name(self._context.evaluator, 'slice') + self._slice_object, = execute_evaluated(context) + return getattr(self._slice_object, name) + @property def obj(self): """ diff --git a/jedi/evaluate/context/klass.py b/jedi/evaluate/context/klass.py index 31572501..65383d3c 100644 --- a/jedi/evaluate/context/klass.py +++ b/jedi/evaluate/context/klass.py @@ -17,91 +17,132 @@ and others. Here's a list: ====================================== ======================================== **Method** **Description** -------------------------------------- ---------------------------------------- -py__call__(params: Array) On callable objects, returns types. +py__call__(arguments: Array) On callable objects, returns types. py__bool__() Returns True/False/None; None means that there's no certainty. py__bases__() Returns a list of base classes. -py__mro__() Returns a list of classes (the mro). py__iter__() Returns a generator of a set of types. py__class__() Returns the class of an instance. -py__getitem__(index: int/str) Returns a a set of types of the index. +py__simple_getitem__(index: int/str) Returns a a set of types of the index. Can raise an IndexError/KeyError. +py__getitem__(indexes: ContextSet) Returns a a set of types of the index. py__file__() Only on modules. Returns None if does not exist. -py__package__() Only on modules. For the import system. +py__package__() -> List[str] Only on modules. For the import system. py__path__() Only on modules. For the import system. py__get__(call_object) Only on instances. Simulates descriptors. -py__doc__(include_call_signature: Returns the docstring for a context. - bool) +py__doc__() Returns the docstring for a context. ====================================== ======================================== """ +from jedi import debug from jedi._compatibility import use_metaclass -from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass +from jedi.parser_utils import get_parent_scope +from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass, \ + evaluator_method_generator_cache from jedi.evaluate import compiled -from jedi.evaluate.lazy_context import LazyKnownContext -from jedi.evaluate.filters import ParserTreeFilter, TreeNameDefinition, \ - ContextName, AnonymousInstanceParamName +from jedi.evaluate.lazy_context import LazyKnownContexts +from jedi.evaluate.filters import ParserTreeFilter +from jedi.evaluate.names import TreeNameDefinition, ContextName +from jedi.evaluate.arguments import unpack_arglist from jedi.evaluate.base_context import ContextSet, iterator_to_context_set, \ - TreeContext + TreeContext, NO_CONTEXTS +from jedi.evaluate.context.function import FunctionAndClassMixin -def apply_py__get__(context, base_context): +def apply_py__get__(context, instance, class_context): try: method = context.py__get__ except AttributeError: yield context else: - for descriptor_context in method(base_context): + for descriptor_context in method(instance, class_context): yield descriptor_context class ClassName(TreeNameDefinition): - def __init__(self, parent_context, tree_name, name_context): + def __init__(self, parent_context, tree_name, name_context, apply_decorators): super(ClassName, self).__init__(parent_context, tree_name) self._name_context = name_context + self._apply_decorators = apply_decorators @iterator_to_context_set def infer(self): - # TODO this _name_to_types might get refactored and be a part of the - # parent class. Once it is, we can probably just overwrite method to - # achieve this. + # We're using a different context to infer, so we cannot call super(). from jedi.evaluate.syntax_tree import tree_name_to_contexts inferred = tree_name_to_contexts( self.parent_context.evaluator, self._name_context, self.tree_name) for result_context in inferred: - for c in apply_py__get__(result_context, self.parent_context): - yield c + if self._apply_decorators: + for c in apply_py__get__(result_context, + instance=None, + class_context=self.parent_context): + yield c + else: + yield result_context class ClassFilter(ParserTreeFilter): name_class = ClassName + def __init__(self, *args, **kwargs): + self._is_instance = kwargs.pop('is_instance') # Python 2 :/ + super(ClassFilter, self).__init__(*args, **kwargs) + def _convert_names(self, names): - return [self.name_class(self.context, name, self._node_context) - for name in names] + return [ + self.name_class( + parent_context=self.context, + tree_name=name, + name_context=self._node_context, + apply_decorators=not self._is_instance, + ) for name in names + ] + + def _equals_origin_scope(self): + node = self._origin_scope + while node is not None: + if node == self._parser_scope or node == self.context: + return True + node = get_parent_scope(node) + return False + + def _access_possible(self, name): + return not name.value.startswith('__') or name.value.endswith('__') \ + or self._equals_origin_scope() + + def _filter(self, names): + names = super(ClassFilter, self)._filter(names) + return [name for name in names if self._access_possible(name)] -class ClassContext(use_metaclass(CachedMetaClass, TreeContext)): - """ - This class is not only important to extend `tree.Class`, it is also a - important for descriptors (if the descriptor methods are evaluated or not). - """ - api_type = u'class' +class ClassMixin(FunctionAndClassMixin): + def is_class(self): + return True - def __init__(self, evaluator, parent_context, classdef): - super(ClassContext, self).__init__(evaluator, parent_context=parent_context) - self.tree_node = classdef + def py__call__(self, arguments): + from jedi.evaluate.context import TreeInstance + return ContextSet([TreeInstance(self.evaluator, self.parent_context, self, arguments)]) - @evaluator_method_cache(default=()) + def py__class__(self): + return compiled.builtin_from_name(self.evaluator, u'type') + + @property + def name(self): + return ContextName(self, self.tree_node.name) + + def get_param_names(self): + for context_ in self.py__getattribute__(u'__init__'): + if context_.is_function(): + return list(context_.get_param_names())[1:] + return [] + + @evaluator_method_generator_cache() def py__mro__(self): - def add(cls): - if cls not in mro: - mro.append(cls) - mro = [self] + yield self # TODO Do a proper mro resolution. Currently we are just listing # classes. However, it's a complicated algorithm. for lazy_cls in self.py__bases__(): @@ -124,36 +165,15 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)): File "", line 1, in TypeError: int() takes at most 2 arguments (3 given) """ - pass + debug.warning('Super class of %s is not a class: %s', self, cls) else: - add(cls) for cls_new in mro_method(): - add(cls_new) - return tuple(mro) + if cls_new not in mro: + mro.append(cls_new) + yield cls_new - @evaluator_method_cache(default=()) - def py__bases__(self): - arglist = self.tree_node.get_super_arglist() - if arglist: - from jedi.evaluate import arguments - args = arguments.TreeArguments(self.evaluator, self.parent_context, arglist) - return [value for key, value in args.unpack() if key is None] - else: - return [LazyKnownContext(compiled.builtin_from_name(self.evaluator, u'object'))] - - def py__call__(self, params): - from jedi.evaluate.context import TreeInstance - return ContextSet(TreeInstance(self.evaluator, self.parent_context, self, params)) - - def py__class__(self): - return compiled.builtin_from_name(self.evaluator, u'type') - - def get_params(self): - from jedi.evaluate.context import AnonymousInstance - anon = AnonymousInstance(self.evaluator, self.parent_context, self) - return [AnonymousInstanceParamName(anon, param.name) for param in self.funcdef.get_params()] - - def get_filters(self, search_global, until_position=None, origin_scope=None, is_instance=False): + def get_filters(self, search_global=False, until_position=None, + origin_scope=None, is_instance=False): if search_global: yield ParserTreeFilter( self.evaluator, @@ -169,29 +189,92 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)): else: yield ClassFilter( self.evaluator, self, node_context=cls, - origin_scope=origin_scope) + origin_scope=origin_scope, + is_instance=is_instance + ) + if not is_instance: + from jedi.evaluate.compiled import builtin_from_name + type_ = builtin_from_name(self.evaluator, u'type') + if type_ != self: + yield next(type_.get_filters()) - def is_class(self): - return True - def get_function_slot_names(self, name): - for filter in self.get_filters(search_global=False): - names = filter.get(name) - if names: - return names - return [] +class ClassContext(use_metaclass(CachedMetaClass, ClassMixin, TreeContext)): + """ + This class is not only important to extend `tree.Class`, it is also a + important for descriptors (if the descriptor methods are evaluated or not). + """ + api_type = u'class' - def get_param_names(self): - for name in self.get_function_slot_names(u'__init__'): - for context_ in name.infer(): - try: - method = context_.get_param_names - except AttributeError: - pass - else: - return list(method())[1:] - return [] + @evaluator_method_cache() + def list_type_vars(self): + found = [] + arglist = self.tree_node.get_super_arglist() + if arglist is None: + return [] - @property - def name(self): - return ContextName(self, self.tree_node.name) + for stars, node in unpack_arglist(arglist): + if stars: + continue # These are not relevant for this search. + + from jedi.evaluate.gradual.annotation import find_unknown_type_vars + for type_var in find_unknown_type_vars(self.parent_context, node): + if type_var not in found: + # The order matters and it's therefore a list. + found.append(type_var) + return found + + @evaluator_method_cache(default=()) + def py__bases__(self): + arglist = self.tree_node.get_super_arglist() + if arglist: + from jedi.evaluate import arguments + args = arguments.TreeArguments(self.evaluator, self.parent_context, arglist) + return [value for key, value in args.unpack() if key is None] + else: + if self.py__name__() == 'object' \ + and self.parent_context == self.evaluator.builtins_module: + return [] + return [LazyKnownContexts( + self.evaluator.builtins_module.py__getattribute__('object') + )] + + def py__getitem__(self, index_context_set, contextualized_node): + from jedi.evaluate.gradual.typing import AnnotatedClass + if not index_context_set: + return ContextSet([self]) + return ContextSet( + AnnotatedClass( + self, + index_context, + context_of_index=contextualized_node.context, + ) + for index_context in index_context_set + ) + + def define_generics(self, type_var_dict): + from jedi.evaluate.gradual.typing import AnnotatedSubClass + + def remap_type_vars(): + """ + The TypeVars in the resulting classes have sometimes different names + and we need to check for that, e.g. a signature can be: + + def iter(iterable: Iterable[_T]) -> Iterator[_T]: ... + + However, the iterator is defined as Iterator[_T_co], which means it has + a different type var name. + """ + for type_var in self.list_type_vars(): + yield type_var_dict.get(type_var.py__name__(), NO_CONTEXTS) + + if type_var_dict: + return AnnotatedSubClass( + self, + given_types=tuple(remap_type_vars()) + ) + return self + + def get_signatures(self): + init_funcs = self.py__getattribute__('__init__') + return [sig.bind(self) for sig in init_funcs.get_signatures()] diff --git a/jedi/evaluate/context/module.py b/jedi/evaluate/context/module.py index 03fb0e55..8c44d5bb 100644 --- a/jedi/evaluate/context/module.py +++ b/jedi/evaluate/context/module.py @@ -1,15 +1,16 @@ import re import os - -from parso import python_bytes_to_unicode +import logging from jedi.evaluate.cache import evaluator_method_cache -from jedi._compatibility import iter_modules, all_suffixes -from jedi.evaluate.filters import GlobalNameFilter, ContextNameMixin, \ - AbstractNameDefinition, ParserTreeFilter, DictFilter, MergedFilter +from jedi.evaluate.names import ContextNameMixin, AbstractNameDefinition +from jedi.evaluate.filters import GlobalNameFilter, ParserTreeFilter, DictFilter, MergedFilter from jedi.evaluate import compiled from jedi.evaluate.base_context import TreeContext -from jedi.evaluate.imports import SubModuleName, infer_import +from jedi.evaluate.names import SubModuleName +from jedi.evaluate.helpers import contexts_from_qualified_names + +logger = logging.getLogger(__name__) class _ModuleAttributeName(AbstractNameDefinition): @@ -38,17 +39,57 @@ class ModuleName(ContextNameMixin, AbstractNameDefinition): return self._name -class ModuleContext(TreeContext): - api_type = u'module' - parent_context = None +def iter_module_names(evaluator, paths): + # Python modules/packages + for n in evaluator.compiled_subprocess.list_module_names(paths): + yield n - def __init__(self, evaluator, module_node, path, code_lines): - super(ModuleContext, self).__init__(evaluator, parent_context=None) - self.tree_node = module_node - self._path = path - self.code_lines = code_lines + for path in paths: + try: + dirs = os.listdir(path) + except OSError: + # The file might not exist or reading it might lead to an error. + logger.error("Not possible to list directory: %s", path) + continue + for name in dirs: + # Namespaces + if os.path.isdir(os.path.join(path, name)): + # pycache is obviously not an interestin namespace. Also the + # name must be a valid identifier. + # TODO use str.isidentifier, once Python 2 is removed + if name != '__pycache__' and not re.search('\W|^\d', name): + yield name + # Stub files + if name.endswith('.pyi'): + if name != '__init__.pyi': + yield name[:-4] - def get_filters(self, search_global, until_position=None, origin_scope=None): + +class SubModuleDictMixin(object): + @evaluator_method_cache() + def sub_modules_dict(self): + """ + Lists modules in the directory of this module (if this module is a + package). + """ + names = {} + try: + method = self.py__path__ + except AttributeError: + pass + else: + mods = iter_module_names(self.evaluator, method()) + for name in mods: + # It's obviously a relative import to the current module. + names[name] = SubModuleName(self, name) + + # In the case of an import like `from x.` we don't need to + # add all the variables, this is only about submodules. + return names + + +class ModuleMixin(SubModuleDictMixin): + def get_filters(self, search_global=False, until_position=None, origin_scope=None): yield MergedFilter( ParserTreeFilter( self.evaluator, @@ -58,8 +99,46 @@ class ModuleContext(TreeContext): ), GlobalNameFilter(self, self.tree_node), ) - yield DictFilter(self._sub_modules_dict()) + yield DictFilter(self.sub_modules_dict()) yield DictFilter(self._module_attributes_dict()) + for star_filter in self.iter_star_filters(): + yield star_filter + + def py__class__(self): + c, = contexts_from_qualified_names(self.evaluator, u'types', u'ModuleType') + return c + + def is_module(self): + return True + + def is_stub(self): + return False + + @property + @evaluator_method_cache() + def name(self): + return ModuleName(self, self._string_name) + + @property + def _string_name(self): + """ This is used for the goto functions. """ + # TODO It's ugly that we even use this, the name is usually well known + # ahead so just pass it when create a ModuleContext. + if self._path is None: + return '' # no path -> empty name + else: + sep = (re.escape(os.path.sep),) * 2 + r = re.search(r'([^%s]*?)(%s__init__)?(\.pyi?|\.so)?$' % sep, self._path) + # Remove PEP 3149 names + return re.sub(r'\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1)) + + @evaluator_method_cache() + def _module_attributes_dict(self): + names = ['__file__', '__package__', '__doc__', '__name__'] + # All the additional module attributes are strings. + return dict((n, _ModuleAttributeName(self, n)) for n in names) + + def iter_star_filters(self, search_global=False): for star_module in self.star_imports(): yield next(star_module.get_filters(search_global)) @@ -68,6 +147,8 @@ class ModuleContext(TreeContext): # to push the star imports into Evaluator.module_cache, if we reenable this. @evaluator_method_cache([]) def star_imports(self): + from jedi.evaluate.imports import infer_import + modules = [] for i in self.tree_node.iter_imports(): if i.is_star_import(): @@ -79,47 +160,42 @@ class ModuleContext(TreeContext): modules += new return modules - @evaluator_method_cache() - def _module_attributes_dict(self): - names = ['__file__', '__package__', '__doc__', '__name__'] - # All the additional module attributes are strings. - return dict((n, _ModuleAttributeName(self, n)) for n in names) - - @property - def _string_name(self): - """ This is used for the goto functions. """ - if self._path is None: - return '' # no path -> empty name - else: - sep = (re.escape(os.path.sep),) * 2 - r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self._path) - # Remove PEP 3149 names - return re.sub(r'\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1)) - - @property - @evaluator_method_cache() - def name(self): - return ModuleName(self, self._string_name) - - def _get_init_directory(self): + def get_qualified_names(self): """ - :return: The path to the directory of a package. None in case it's not - a package. + A module doesn't have a qualified name, but it's important to note that + it's reachable and not `None`. With this information we can add + qualified names on top for all context children. """ - for suffix in all_suffixes(): - ending = '__init__' + suffix - py__file__ = self.py__file__() - if py__file__ is not None and py__file__.endswith(ending): - # Remove the ending, including the separator. - return self.py__file__()[:-len(ending) - 1] - return None + return [] + + +class ModuleContext(ModuleMixin, TreeContext): + api_type = u'module' + parent_context = None + + def __init__(self, evaluator, module_node, path, string_names, code_lines, is_package=False): + super(ModuleContext, self).__init__( + evaluator, + parent_context=None, + tree_node=module_node + ) + self._path = path + self.string_names = string_names + self.code_lines = code_lines + self.is_package = is_package + + def is_stub(self): + if self._path is not None and self._path.endswith('.pyi'): + # Currently this is the way how we identify stubs when e.g. goto is + # used in them. This could be changed if stubs would be identified + # sooner and used as StubModuleContext. + return True + return super(ModuleContext, self).is_stub() def py__name__(self): - for name, module in self.evaluator.module_cache.iterate_modules_with_names(): - if module == self and name != '': - return name - - return '__main__' + if self.string_names is None: + return None + return '.'.join(self.string_names) def py__file__(self): """ @@ -131,35 +207,34 @@ class ModuleContext(TreeContext): return os.path.abspath(self._path) def py__package__(self): - if self._get_init_directory() is None: - return re.sub(r'\.?[^.]+$', '', self.py__name__()) - else: - return self.py__name__() + if self.is_package: + return self.string_names + return self.string_names[:-1] def _py__path__(self): - search_path = self.evaluator.get_sys_path() - init_path = self.py__file__() - if os.path.basename(init_path) == '__init__.py': - with open(init_path, 'rb') as f: - content = python_bytes_to_unicode(f.read(), errors='replace') - # these are strings that need to be used for namespace packages, - # the first one is ``pkgutil``, the second ``pkg_resources``. - options = ('declare_namespace(__name__)', 'extend_path(__path__') - if options[0] in content or options[1] in content: - # It is a namespace, now try to find the rest of the - # modules on sys_path or whatever the search_path is. - paths = set() - for s in search_path: - other = os.path.join(s, self.name.string_name) - if os.path.isdir(other): - paths.add(other) - if paths: - return list(paths) - # TODO I'm not sure if this is how nested namespace - # packages work. The tests are not really good enough to - # show that. - # Default to this. - return [self._get_init_directory()] + # A namespace package is typically auto generated and ~10 lines long. + first_few_lines = ''.join(self.code_lines[:50]) + # these are strings that need to be used for namespace packages, + # the first one is ``pkgutil``, the second ``pkg_resources``. + options = ('declare_namespace(__name__)', 'extend_path(__path__') + if options[0] in first_few_lines or options[1] in first_few_lines: + # It is a namespace, now try to find the rest of the + # modules on sys_path or whatever the search_path is. + paths = set() + for s in self.evaluator.get_sys_path(): + other = os.path.join(s, self.name.string_name) + if os.path.isdir(other): + paths.add(other) + if paths: + return list(paths) + # Nested namespace packages will not be supported. Nobody ever + # asked for it and in Python 3 they are there without using all the + # crap above. + + # Default to the of this file. + file = self.py__file__() + assert file is not None # Shouldn't be a package in the first place. + return [os.path.dirname(file)] @property def py__path__(self): @@ -173,44 +248,14 @@ class ModuleContext(TreeContext): is a list of paths (strings). Raises an AttributeError if the module is not a package. """ - path = self._get_init_directory() - - if path is None: - raise AttributeError('Only packages have __path__ attributes.') - else: + if self.is_package: return self._py__path__ - - @evaluator_method_cache() - def _sub_modules_dict(self): - """ - Lists modules in the directory of this module (if this module is a - package). - """ - names = {} - try: - method = self.py__path__ - except AttributeError: - pass else: - for path in method(): - mods = iter_modules([path]) - for module_loader, name, is_pkg in mods: - # It's obviously a relative import to the current module. - names[name] = SubModuleName(self, name) - - # TODO add something like this in the future, its cleaner than the - # import hacks. - # ``os.path`` is a hardcoded exception, because it's a - # ``sys.modules`` modification. - # if str(self.name) == 'os': - # names.append(Name('path', parent_context=self)) - - return names - - def py__class__(self): - return compiled.get_special_object(self.evaluator, u'MODULE_CLASS') + raise AttributeError('Only packages have __path__ attributes.') def __repr__(self): - return "<%s: %s@%s-%s>" % ( + return "<%s: %s@%s-%s is_stub=%s>" % ( self.__class__.__name__, self._string_name, - self.tree_node.start_pos[0], self.tree_node.end_pos[0]) + self.tree_node.start_pos[0], self.tree_node.end_pos[0], + self._path is not None and self.is_stub() + ) diff --git a/jedi/evaluate/context/namespace.py b/jedi/evaluate/context/namespace.py index c6ff1e63..12c8b3a6 100644 --- a/jedi/evaluate/context/namespace.py +++ b/jedi/evaluate/context/namespace.py @@ -1,10 +1,8 @@ -import os -from itertools import chain - from jedi.evaluate.cache import evaluator_method_cache -from jedi.evaluate import imports -from jedi.evaluate.filters import DictFilter, AbstractNameDefinition, ContextNameMixin -from jedi.evaluate.base_context import TreeContext, ContextSet +from jedi.evaluate.filters import DictFilter +from jedi.evaluate.names import ContextNameMixin, AbstractNameDefinition +from jedi.evaluate.base_context import Context +from jedi.evaluate.context.module import SubModuleDictMixin class ImplicitNSName(ContextNameMixin, AbstractNameDefinition): @@ -17,7 +15,7 @@ class ImplicitNSName(ContextNameMixin, AbstractNameDefinition): self.string_name = string_name -class ImplicitNamespaceContext(TreeContext): +class ImplicitNamespaceContext(Context, SubModuleDictMixin): """ Provides support for implicit namespace packages """ @@ -31,15 +29,15 @@ class ImplicitNamespaceContext(TreeContext): super(ImplicitNamespaceContext, self).__init__(evaluator, parent_context=None) self.evaluator = evaluator self._fullname = fullname - self.paths = paths + self._paths = paths - def get_filters(self, search_global, until_position=None, origin_scope=None): - yield DictFilter(self._sub_modules_dict()) + def get_filters(self, search_global=False, until_position=None, origin_scope=None): + yield DictFilter(self.sub_modules_dict()) @property @evaluator_method_cache() def name(self): - string_name = self.py__package__().rpartition('.')[-1] + string_name = self.py__package__()[-1] return ImplicitNSName(self, string_name) def py__file__(self): @@ -48,25 +46,19 @@ class ImplicitNamespaceContext(TreeContext): def py__package__(self): """Return the fullname """ - return self._fullname + return self._fullname.split('.') def py__path__(self): - return [self.paths] + return self._paths def py__name__(self): return self._fullname - @evaluator_method_cache() - def _sub_modules_dict(self): - names = {} + def is_namespace(self): + return True - file_names = chain.from_iterable(os.listdir(path) for path in self.paths) - mods = [ - file_name.rpartition('.')[0] if '.' in file_name else file_name - for file_name in file_names - if file_name != '__pycache__' - ] + def is_stub(self): + return False - for name in mods: - names[name] = imports.SubModuleName(self, name) - return names + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._fullname) diff --git a/jedi/evaluate/docstrings.py b/jedi/evaluate/docstrings.py index 86b56673..8d2df039 100644 --- a/jedi/evaluate/docstrings.py +++ b/jedi/evaluate/docstrings.py @@ -21,6 +21,7 @@ from textwrap import dedent from parso import parse, ParserSyntaxError from jedi._compatibility import u +from jedi import debug from jedi.evaluate.utils import indent_block from jedi.evaluate.cache import evaluator_method_cache from jedi.evaluate.base_context import iterator_to_context_set, ContextSet, \ @@ -47,13 +48,12 @@ _numpy_doc_string_cache = None def _get_numpy_doc_string_cls(): global _numpy_doc_string_cache - if isinstance(_numpy_doc_string_cache, ImportError): + if isinstance(_numpy_doc_string_cache, (ImportError, SyntaxError)): raise _numpy_doc_string_cache try: from numpydoc.docscrape import NumpyDocString _numpy_doc_string_cache = NumpyDocString - except ImportError as e: - _numpy_doc_string_cache = e + except (ImportError, SyntaxError) as e: raise return _numpy_doc_string_cache @@ -64,11 +64,11 @@ def _search_param_in_numpydocstr(docstr, param_str): # This is a non-public API. If it ever changes we should be # prepared and return gracefully. params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters'] - except (KeyError, AttributeError, ImportError): + except (KeyError, AttributeError, ImportError, SyntaxError): return [] for p_name, p_type, p_descr in params: if p_name == param_str: - m = re.match('([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) + m = re.match(r'([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) if m: p_type = m.group(1) return list(_expand_typestr(p_type)) @@ -103,11 +103,11 @@ def _expand_typestr(type_str): Attempts to interpret the possible types in `type_str` """ # Check if alternative types are specified with 'or' - if re.search('\\bor\\b', type_str): + if re.search(r'\bor\b', type_str): for t in type_str.split('or'): yield t.split('of')[0].strip() # Check if like "list of `type`" and set type to list - elif re.search('\\bof\\b', type_str): + elif re.search(r'\bof\b', type_str): yield type_str.split('of')[0] # Check if type has is a set of valid literal values eg: {'C', 'F', 'A'} elif type_str.startswith('{'): @@ -194,7 +194,7 @@ def _evaluate_for_statement_string(module_context, string): if string is None: return [] - for element in re.findall('((?:\w+\.)*\w+)\.', string): + for element in re.findall(r'((?:\w+\.)*\w+)\.', string): # Try to import module part in dotted name. # (e.g., 'threading' in 'threading.Thread'). string = 'import %s\n' % element + string @@ -202,6 +202,7 @@ def _evaluate_for_statement_string(module_context, string): # Take the default grammar here, if we load the Python 2.7 grammar here, it # will be impossible to use `...` (Ellipsis) as a token. Docstring types # don't need to conform with the current grammar. + debug.dbg('Parse docstring code %s', string, color='BLUE') grammar = module_context.evaluator.latest_grammar try: module = grammar.parse(code.format(indent_block(string)), error_recovery=False) @@ -261,15 +262,16 @@ def _execute_array_values(evaluator, array): values.append(LazyKnownContexts(objects)) return {FakeSequence(evaluator, array.array_type, values)} else: - return array.execute_evaluated() + return array.execute_annotation() @evaluator_method_cache() def infer_param(execution_context, param): - from jedi.evaluate.context.instance import AnonymousInstanceFunctionExecution + from jedi.evaluate.context.instance import InstanceArguments + from jedi.evaluate.context import FunctionExecutionContext def eval_docstring(docstring): - return ContextSet.from_iterable( + return ContextSet( p for param_str in _search_param_in_docstr(docstring, param.name.value) for p in _evaluate_for_statement_string(module_context, param_str) @@ -280,11 +282,13 @@ def infer_param(execution_context, param): return NO_CONTEXTS types = eval_docstring(execution_context.py__doc__()) - if isinstance(execution_context, AnonymousInstanceFunctionExecution) and \ - execution_context.function_context.name.string_name == '__init__': - class_context = execution_context.instance.class_context + if isinstance(execution_context, FunctionExecutionContext) \ + and isinstance(execution_context.var_args, InstanceArguments) \ + and execution_context.function_context.py__name__() == '__init__': + class_context = execution_context.var_args.instance.class_context types |= eval_docstring(class_context.py__doc__()) + debug.dbg('Found param types for docstring: %s', types, color='BLUE') return types diff --git a/jedi/evaluate/dynamic.py b/jedi/evaluate/dynamic.py index 9e8d5714..6b10014c 100644 --- a/jedi/evaluate/dynamic.py +++ b/jedi/evaluate/dynamic.py @@ -28,22 +28,30 @@ from jedi.evaluate.helpers import is_stdlib_path from jedi.evaluate.utils import to_list from jedi.parser_utils import get_parent_scope from jedi.evaluate.context import ModuleContext, instance -from jedi.evaluate.base_context import ContextSet - +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS +from jedi.evaluate import recursion MAX_PARAM_SEARCHES = 20 -class MergedExecutedParams(object): +class DynamicExecutedParams(object): """ Simulates being a parameter while actually just being multiple params. """ - def __init__(self, executed_params): + + def __init__(self, evaluator, executed_params): + self.evaluator = evaluator self._executed_params = executed_params def infer(self): - return ContextSet.from_sets(p.infer() for p in self._executed_params) + with recursion.execution_allowed(self.evaluator, self) as allowed: + # We need to catch recursions that may occur, because an + # anonymous functions can create an anonymous parameter that is + # more or less self referencing. + if allowed: + return ContextSet.from_sets(p.infer() for p in self._executed_params) + return NO_CONTEXTS @debug.increase_indent @@ -91,10 +99,10 @@ def search_params(evaluator, execution_context, funcdef): ) if function_executions: zipped_params = zip(*list( - function_execution.get_params() + function_execution.get_executed_params_and_issues()[0] for function_execution in function_executions )) - params = [MergedExecutedParams(executed_params) for executed_params in zipped_params] + params = [DynamicExecutedParams(evaluator, executed_params) for executed_params in zipped_params] # Evaluate the ExecutedParams to types. else: return create_default_params(execution_context, funcdef) @@ -114,7 +122,7 @@ def _search_function_executions(evaluator, module_context, funcdef, string_name) compare_node = funcdef if string_name == '__init__': cls = get_parent_scope(funcdef) - if isinstance(cls, tree.Class): + if cls.type == 'classdef': string_name = cls.name.value compare_node = cls @@ -200,7 +208,7 @@ def _check_name_for_execution(evaluator, context, compare_node, name, trailer): # Here we're trying to find decorators by checking the first # parameter. It's not very generic though. Should find a better # solution that also applies to nested decorators. - params = value.parent_context.get_params() + params, _ = value.parent_context.get_executed_params_and_issues() if len(params) != 1: continue values = params[0].infer() diff --git a/jedi/evaluate/filters.py b/jedi/evaluate/filters.py index 4294f2a6..2f431650 100644 --- a/jedi/evaluate/filters.py +++ b/jedi/evaluate/filters.py @@ -6,158 +6,13 @@ from abc import abstractmethod from parso.tree import search_ancestor -from jedi._compatibility import use_metaclass, Parameter -from jedi.cache import memoize_method +from jedi._compatibility import use_metaclass from jedi.evaluate import flow_analysis from jedi.evaluate.base_context import ContextSet, Context from jedi.parser_utils import get_parent_scope from jedi.evaluate.utils import to_list - - -class AbstractNameDefinition(object): - start_pos = None - string_name = None - parent_context = None - tree_name = None - - @abstractmethod - def infer(self): - raise NotImplementedError - - @abstractmethod - def goto(self): - # Typically names are already definitions and therefore a goto on that - # name will always result on itself. - return {self} - - def get_root_context(self): - return self.parent_context.get_root_context() - - def __repr__(self): - if self.start_pos is None: - return '<%s: %s>' % (self.__class__.__name__, self.string_name) - return '<%s: %s@%s>' % (self.__class__.__name__, self.string_name, self.start_pos) - - def execute(self, arguments): - return self.infer().execute(arguments) - - def execute_evaluated(self, *args, **kwargs): - return self.infer().execute_evaluated(*args, **kwargs) - - def is_import(self): - return False - - @property - def api_type(self): - return self.parent_context.api_type - - -class AbstractTreeName(AbstractNameDefinition): - def __init__(self, parent_context, tree_name): - self.parent_context = parent_context - self.tree_name = tree_name - - def goto(self): - return self.parent_context.evaluator.goto(self.parent_context, self.tree_name) - - def is_import(self): - imp = search_ancestor(self.tree_name, 'import_from', 'import_name') - return imp is not None - - @property - def string_name(self): - return self.tree_name.value - - @property - def start_pos(self): - return self.tree_name.start_pos - - -class ContextNameMixin(object): - def infer(self): - return ContextSet(self._context) - - def get_root_context(self): - if self.parent_context is None: - return self._context - return super(ContextNameMixin, self).get_root_context() - - @property - def api_type(self): - return self._context.api_type - - -class ContextName(ContextNameMixin, AbstractTreeName): - def __init__(self, context, tree_name): - super(ContextName, self).__init__(context.parent_context, tree_name) - self._context = context - - -class TreeNameDefinition(AbstractTreeName): - _API_TYPES = dict( - import_name='module', - import_from='module', - funcdef='function', - param='param', - classdef='class', - ) - - def infer(self): - # Refactor this, should probably be here. - from jedi.evaluate.syntax_tree import tree_name_to_contexts - return tree_name_to_contexts(self.parent_context.evaluator, self.parent_context, self.tree_name) - - @property - def api_type(self): - definition = self.tree_name.get_definition(import_name_always=True) - if definition is None: - return 'statement' - return self._API_TYPES.get(definition.type, 'statement') - - -class ParamName(AbstractTreeName): - api_type = u'param' - - def __init__(self, parent_context, tree_name): - self.parent_context = parent_context - self.tree_name = tree_name - - def get_kind(self): - tree_param = search_ancestor(self.tree_name, 'param') - if tree_param.star_count == 1: # *args - return Parameter.VAR_POSITIONAL - if tree_param.star_count == 2: # **kwargs - return Parameter.VAR_KEYWORD - - parent = tree_param.parent - for p in parent.children: - if p.type == 'param': - if p.star_count: - return Parameter.KEYWORD_ONLY - if p == tree_param: - break - return Parameter.POSITIONAL_OR_KEYWORD - - def infer(self): - return self.get_param().infer() - - def get_param(self): - params = self.parent_context.get_params() - param_node = search_ancestor(self.tree_name, 'param') - return params[param_node.position_index] - - -class AnonymousInstanceParamName(ParamName): - def infer(self): - param_node = search_ancestor(self.tree_name, 'param') - # TODO I think this should not belong here. It's not even really true, - # because classmethod and other descriptors can change it. - if param_node.position_index == 0: - # This is a speed optimization, to return the self param (because - # it's known). This only affects anonymous instances. - return ContextSet(self.parent_context.instance) - else: - return self.get_param().infer() +from jedi.evaluate.cache import evaluator_function_cache +from jedi.evaluate.names import TreeNameDefinition, ParamName, AbstractNameDefinition class AbstractFilter(object): @@ -177,34 +32,67 @@ class AbstractFilter(object): raise NotImplementedError +class FilterWrapper(object): + name_wrapper_class = None + + def __init__(self, wrapped_filter): + self._wrapped_filter = wrapped_filter + + def wrap_names(self, names): + return [self.name_wrapper_class(name) for name in names] + + def get(self, name): + return self.wrap_names(self._wrapped_filter.get(name)) + + def values(self, name): + return self.wrap_names(self._wrapped_filter.values()) + + +@evaluator_function_cache() +def _get_definition_names(evaluator, module_node, name_key): + try: + names = module_node.get_used_names()[name_key] + except KeyError: + return [] + return [name for name in names if name.is_definition()] + + class AbstractUsedNamesFilter(AbstractFilter): name_class = TreeNameDefinition def __init__(self, context, parser_scope): self._parser_scope = parser_scope - self._used_names = self._parser_scope.get_root_node().get_used_names() + self._module_node = self._parser_scope.get_root_node() + self._used_names = self._module_node.get_used_names() self.context = context def get(self, name): - try: - names = self._used_names[name] - except KeyError: - return [] - - return self._convert_names(self._filter(names)) + #print(self, self.context, name, type(self).__name__) + #import traceback, sys; traceback.print_stack(file=sys.stdout) + return self._convert_names(self._filter( + _get_definition_names(self.context.evaluator, self._module_node, name) + )) def _convert_names(self, names): return [self.name_class(self.context, name) for name in names] def values(self): - return self._convert_names(name for name_list in self._used_names.values() - for name in self._filter(name_list)) + evaluator = self.context.evaluator + module_node = self._module_node + return self._convert_names( + name + for name_key in self._used_names + for name in self._filter( + _get_definition_names(evaluator, module_node, name_key) + ) + ) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.context) class ParserTreeFilter(AbstractUsedNamesFilter): + # TODO remove evaluator as an argument, it's not used. def __init__(self, evaluator, context, node_context=None, until_position=None, origin_scope=None): """ @@ -226,8 +114,6 @@ class ParserTreeFilter(AbstractUsedNamesFilter): return list(self._check_flows(names)) def _is_name_reachable(self, name): - if not name.is_definition(): - return False parent = name.parent if parent.type == 'trailer': return False @@ -272,20 +158,29 @@ class FunctionExecutionFilter(ParserTreeFilter): yield TreeNameDefinition(self.context, name) -class AnonymousInstanceFunctionExecutionFilter(FunctionExecutionFilter): - param_name = AnonymousInstanceParamName - - class GlobalNameFilter(AbstractUsedNamesFilter): def __init__(self, context, parser_scope): super(GlobalNameFilter, self).__init__(context, parser_scope) + def get(self, name): + try: + names = self._used_names[name] + except KeyError: + return [] + return self._convert_names(self._filter(names)) + @to_list def _filter(self, names): for name in names: if name.parent.type == 'global_stmt': yield name + def values(self): + return self._convert_names( + name for name_list in self._used_names.values() + for name in self._filter(name_list) + ) + class DictFilter(AbstractFilter): def __init__(self, dct): @@ -338,7 +233,7 @@ class _BuiltinMappedMethod(Context): self._method = method self._builtin_func = builtin_func - def py__call__(self, params): + def py__call__(self, arguments): # TODO add TypeError if params are given/or not correct. return self._method(self.parent_context) @@ -376,9 +271,9 @@ class SpecialMethodFilter(DictFilter): else: continue break - return ContextSet( + return ContextSet([ _BuiltinMappedMethod(self.parent_context, self._callable, builtin_func) - ) + ]) def __init__(self, context, dct, builtin_context): super(SpecialMethodFilter, self).__init__(dct) @@ -418,7 +313,7 @@ class AbstractObjectOverwrite(use_metaclass(_OverwriteMeta, object)): def get_object(self): raise NotImplementedError - def get_filters(self, search_global, *args, **kwargs): + def get_filters(self, search_global=False, *args, **kwargs): yield SpecialMethodFilter(self, self.overwritten_methods, self.get_object()) for filter in self.get_object().get_filters(search_global): @@ -426,17 +321,9 @@ class AbstractObjectOverwrite(use_metaclass(_OverwriteMeta, object)): class BuiltinOverwrite(Context, AbstractObjectOverwrite): - special_object_identifier = None - def __init__(self, evaluator): super(BuiltinOverwrite, self).__init__(evaluator, evaluator.builtins_module) - @memoize_method - def get_object(self): - from jedi.evaluate import compiled - assert self.special_object_identifier - return compiled.get_special_object(self.evaluator, self.special_object_identifier) - def py__class__(self): return self.get_object().py__class__() @@ -493,8 +380,8 @@ def get_global_filters(evaluator, context, until_position, origin_scope): Finally, it yields the builtin filter, if `include_builtin` is true (default). - >>> filters[3].values() #doctest: +ELLIPSIS - [, ...] + >>> list(filters[3].values()) #doctest: +ELLIPSIS + [...] """ from jedi.evaluate.context.function import FunctionExecutionContext while context is not None: @@ -511,5 +398,5 @@ def get_global_filters(evaluator, context, until_position, origin_scope): context = context.parent_context # Add builtins to the global scope. - for filter in evaluator.builtins_module.get_filters(search_global=True): + for filter in evaluator.builtins_module.get_filters(): yield filter diff --git a/jedi/evaluate/finder.py b/jedi/evaluate/finder.py index 5e7043f7..4db5bbaf 100644 --- a/jedi/evaluate/finder.py +++ b/jedi/evaluate/finder.py @@ -19,16 +19,17 @@ from parso.python import tree from parso.tree import search_ancestor from jedi import debug from jedi import settings -from jedi.evaluate.context import AbstractInstanceContext from jedi.evaluate import compiled from jedi.evaluate import analysis from jedi.evaluate import flow_analysis from jedi.evaluate.arguments import TreeArguments from jedi.evaluate import helpers from jedi.evaluate.context import iterable -from jedi.evaluate.filters import get_global_filters, TreeNameDefinition -from jedi.evaluate.base_context import ContextSet +from jedi.evaluate.filters import get_global_filters +from jedi.evaluate.names import TreeNameDefinition +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS from jedi.parser_utils import is_scope, get_parent_scope +from jedi.evaluate.gradual.conversion import stub_to_actual_context_set class NameFinder(object): @@ -61,7 +62,7 @@ class NameFinder(object): node=self._name, ) if check is flow_analysis.UNREACHABLE: - return ContextSet() + return NO_CONTEXTS return self._found_predefined_types types = self._names_to_types(names, attribute_lookup) @@ -110,13 +111,23 @@ class NameFinder(object): ancestor = search_ancestor(origin_scope, 'funcdef', 'classdef') if ancestor is not None: colon = ancestor.children[-2] - if position < colon.start_pos: + if position is not None and position < colon.start_pos: if lambdef is None or position < lambdef.children[-2].start_pos: position = ancestor.start_pos return get_global_filters(self._evaluator, self._context, position, origin_scope) else: - return self._context.get_filters(search_global, self._position, origin_scope=origin_scope) + return self._get_context_filters(origin_scope) + + def _get_context_filters(self, origin_scope): + for f in self._context.get_filters(False, self._position, origin_scope=origin_scope): + yield f + # This covers the case where a stub files are incomplete. + if self._context.is_stub(): + contexts = stub_to_actual_context_set(self._context, ignore_compiled=True) + for c in contexts: + for f in c.get_filters(): + yield f def filter_name(self, filters): """ @@ -178,16 +189,23 @@ class NameFinder(object): contexts = ContextSet.from_sets(name.infer() for name in names) debug.dbg('finder._names_to_types: %s -> %s', names, contexts) - if not names and isinstance(self._context, AbstractInstanceContext): + if not names and self._context.is_instance(): # handling __getattr__ / __getattribute__ return self._check_getattr(self._context) # Add isinstance and other if/assert knowledge. if not contexts and isinstance(self._name, tree.Name) and \ - not isinstance(self._name_context, AbstractInstanceContext): + not self._name_context.is_instance(): flow_scope = self._name - base_node = self._name_context.tree_node - if base_node.type == 'comp_for': + base_nodes = [self._name_context.tree_node] + try: + stub_node = self._name_context.stub_context.tree_node + except AttributeError: + pass + else: + base_nodes.append(stub_node) + + if any(b.type == 'comp_for' for b in base_nodes): return contexts while True: flow_scope = get_parent_scope(flow_scope, include_flows=True) @@ -195,7 +213,7 @@ class NameFinder(object): self._name, self._position) if n is not None: return n - if flow_scope == base_node: + if flow_scope in base_nodes: break return contexts @@ -267,12 +285,11 @@ def _check_isinstance_type(context, element, search_name): except AssertionError: return None - context_set = ContextSet() + context_set = NO_CONTEXTS for cls_or_tup in lazy_context_cls.infer(): if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple': for lazy_context in cls_or_tup.py__iter__(): - for context in lazy_context.infer(): - context_set |= context.execute_evaluated() + context_set |= lazy_context.infer().execute_evaluated(context) else: - context_set |= cls_or_tup.execute_evaluated() + context_set |= helpers.execute_evaluated(cls_or_tup) return context_set diff --git a/jedi/evaluate/gradual/__init__.py b/jedi/evaluate/gradual/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/jedi/evaluate/gradual/annotation.py b/jedi/evaluate/gradual/annotation.py new file mode 100644 index 00000000..d6ae9eb9 --- /dev/null +++ b/jedi/evaluate/gradual/annotation.py @@ -0,0 +1,384 @@ +""" +PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints +through function annotations. There is a strong suggestion in this document +that only the type of type hinting defined in PEP0484 should be allowed +as annotations in future python versions. +""" + +import re + +from parso import ParserSyntaxError, parse + +from jedi._compatibility import force_unicode +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS +from jedi.evaluate.gradual.typing import TypeVar, AnnotatedClass, \ + AbstractAnnotatedClass +from jedi.evaluate.helpers import is_string +from jedi import debug +from jedi import parser_utils + + +def eval_annotation(context, annotation): + """ + Evaluates an annotation node. This means that it evaluates the part of + `int` here: + + foo: int = 3 + + Also checks for forward references (strings) + """ + context_set = context.eval_node(annotation) + if len(context_set) != 1: + debug.warning("Eval'ed typing index %s should lead to 1 object, " + " not %s" % (annotation, context_set)) + return context_set + + evaled_context = list(context_set)[0] + if is_string(evaled_context): + result = _get_forward_reference_node(context, evaled_context.get_safe_value()) + if result is not None: + return context.eval_node(result) + return context_set + + +def _evaluate_annotation_string(context, string, index=None): + node = _get_forward_reference_node(context, string) + if node is None: + return NO_CONTEXTS + + context_set = context.eval_node(node) + if index is not None: + context_set = context_set.filter( + lambda context: context.array_type == u'tuple' # noqa + and len(list(context.py__iter__())) >= index + ).py__simple_getitem__(index) + return context_set + + +def _get_forward_reference_node(context, string): + try: + new_node = context.evaluator.grammar.parse( + force_unicode(string), + start_symbol='eval_input', + error_recovery=False + ) + except ParserSyntaxError: + debug.warning('Annotation not parsed: %s' % string) + return None + else: + module = context.tree_node.get_root_node() + parser_utils.move(new_node, module.end_pos[0]) + new_node.parent = context.tree_node + return new_node + + +def _split_comment_param_declaration(decl_text): + """ + Split decl_text on commas, but group generic expressions + together. + + For example, given "foo, Bar[baz, biz]" we return + ['foo', 'Bar[baz, biz]']. + + """ + try: + node = parse(decl_text, error_recovery=False).children[0] + except ParserSyntaxError: + debug.warning('Comment annotation is not valid Python: %s' % decl_text) + return [] + + if node.type == 'name': + return [node.get_code().strip()] + + params = [] + try: + children = node.children + except AttributeError: + return [] + else: + for child in children: + if child.type in ['name', 'atom_expr', 'power']: + params.append(child.get_code().strip()) + + return params + + +@evaluator_method_cache() +def infer_param(execution_context, param): + """ + Infers the type of a function parameter, using type annotations. + """ + annotation = param.annotation + if annotation is None: + # If no Python 3-style annotation, look for a Python 2-style comment + # annotation. + # Identify parameters to function in the same sequence as they would + # appear in a type comment. + all_params = [child for child in param.parent.children + if child.type == 'param'] + + node = param.parent.parent + comment = parser_utils.get_following_comment_same_line(node) + if comment is None: + return NO_CONTEXTS + + match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment) + if not match: + return NO_CONTEXTS + params_comments = _split_comment_param_declaration(match.group(1)) + + # Find the specific param being investigated + index = all_params.index(param) + # If the number of parameters doesn't match length of type comment, + # ignore first parameter (assume it's self). + if len(params_comments) != len(all_params): + debug.warning( + "Comments length != Params length %s %s", + params_comments, all_params + ) + from jedi.evaluate.context.instance import InstanceArguments + if isinstance(execution_context.var_args, InstanceArguments): + if index == 0: + # Assume it's self, which is already handled + return NO_CONTEXTS + index -= 1 + if index >= len(params_comments): + return NO_CONTEXTS + + param_comment = params_comments[index] + return _evaluate_annotation_string( + execution_context.function_context.get_default_param_context(), + param_comment + ) + # Annotations are like default params and resolve in the same way. + context = execution_context.function_context.get_default_param_context() + return eval_annotation(context, annotation) + + +def py__annotations__(funcdef): + dct = {} + for function_param in funcdef.get_params(): + param_annotation = function_param.annotation + if param_annotation is not None: + dct[function_param.name.value] = param_annotation + + return_annotation = funcdef.annotation + if return_annotation: + dct['return'] = return_annotation + return dct + + +@evaluator_method_cache() +def infer_return_types(function_execution_context): + """ + Infers the type of a function's return value, + according to type annotations. + """ + all_annotations = py__annotations__(function_execution_context.tree_node) + annotation = all_annotations.get("return", None) + if annotation is None: + # If there is no Python 3-type annotation, look for a Python 2-type annotation + node = function_execution_context.tree_node + comment = parser_utils.get_following_comment_same_line(node) + if comment is None: + return NO_CONTEXTS + + match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment) + if not match: + return NO_CONTEXTS + + return _evaluate_annotation_string( + function_execution_context.function_context.get_default_param_context(), + match.group(1).strip() + ).execute_annotation() + if annotation is None: + return NO_CONTEXTS + + context = function_execution_context.function_context.get_default_param_context() + unknown_type_vars = list(find_unknown_type_vars(context, annotation)) + annotation_contexts = eval_annotation(context, annotation) + if not unknown_type_vars: + return annotation_contexts.execute_annotation() + + type_var_dict = infer_type_vars_for_execution(function_execution_context, all_annotations) + + return ContextSet( + ann.define_generics(type_var_dict) + if isinstance(ann, AbstractAnnotatedClass) else ann + for ann in annotation_contexts + ).execute_annotation() + + +def infer_type_vars_for_execution(execution_context, annotation_dict): + """ + Some functions use type vars that are not defined by the class, but rather + only defined in the function. See for example `iter`. In those cases we + want to: + + 1. Search for undefined type vars. + 2. Infer type vars with the execution state we have. + 3. Return the union of all type vars that have been found. + """ + context = execution_context.function_context.get_default_param_context() + + annotation_variable_results = {} + executed_params, _ = execution_context.get_executed_params_and_issues() + for executed_param in executed_params: + try: + annotation_node = annotation_dict[executed_param.string_name] + except KeyError: + continue + + annotation_variables = find_unknown_type_vars(context, annotation_node) + if annotation_variables: + # Infer unknown type var + annotation_context_set = context.eval_node(annotation_node) + star_count = executed_param._param_node.star_count + actual_context_set = executed_param.infer(use_hints=False) + if star_count == 1: + actual_context_set = actual_context_set.merge_types_of_iterate() + elif star_count == 2: + # TODO _dict_values is not public. + actual_context_set = actual_context_set.try_merge('_dict_values') + for ann in annotation_context_set: + _merge_type_var_dicts( + annotation_variable_results, + _infer_type_vars(ann, actual_context_set), + ) + + return annotation_variable_results + + +def _merge_type_var_dicts(base_dict, new_dict): + for type_var_name, contexts in new_dict.items(): + try: + base_dict[type_var_name] |= contexts + except KeyError: + base_dict[type_var_name] = contexts + + +def _infer_type_vars(annotation_context, context_set): + """ + This function tries to find information about undefined type vars and + returns a dict from type var name to context set. + + This is for example important to understand what `iter([1])` returns. + According to typeshed, `iter` returns an `Iterator[_T]`: + + def iter(iterable: Iterable[_T]) -> Iterator[_T]: ... + + This functions would generate `int` for `_T` in this case, because it + unpacks the `Iterable`. + """ + type_var_dict = {} + if isinstance(annotation_context, TypeVar): + return {annotation_context.py__name__(): context_set.py__class__()} + elif isinstance(annotation_context, AnnotatedClass): + name = annotation_context.py__name__() + if name == 'Iterable': + given = annotation_context.get_given_types() + if given: + for nested_annotation_context in given[0]: + _merge_type_var_dicts( + type_var_dict, + _infer_type_vars( + nested_annotation_context, + context_set.merge_types_of_iterate() + ) + ) + elif name == 'Mapping': + given = annotation_context.get_given_types() + if len(given) == 2: + for context in context_set: + try: + method = context.get_mapping_item_contexts + except AttributeError: + continue + key_contexts, value_contexts = method() + + for nested_annotation_context in given[0]: + _merge_type_var_dicts( + type_var_dict, + _infer_type_vars( + nested_annotation_context, + key_contexts, + ) + ) + for nested_annotation_context in given[1]: + _merge_type_var_dicts( + type_var_dict, + _infer_type_vars( + nested_annotation_context, + value_contexts, + ) + ) + return type_var_dict + + +def find_type_from_comment_hint_for(context, node, name): + return _find_type_from_comment_hint(context, node, node.children[1], name) + + +def find_type_from_comment_hint_with(context, node, name): + assert len(node.children[1].children) == 3, \ + "Can only be here when children[1] is 'foo() as f'" + varlist = node.children[1].children[2] + return _find_type_from_comment_hint(context, node, varlist, name) + + +def find_type_from_comment_hint_assign(context, node, name): + return _find_type_from_comment_hint(context, node, node.children[0], name) + + +def _find_type_from_comment_hint(context, node, varlist, name): + index = None + if varlist.type in ("testlist_star_expr", "exprlist", "testlist"): + # something like "a, b = 1, 2" + index = 0 + for child in varlist.children: + if child == name: + break + if child.type == "operator": + continue + index += 1 + else: + return [] + + comment = parser_utils.get_following_comment_same_line(node) + if comment is None: + return [] + match = re.match(r"^#\s*type:\s*([^#]*)", comment) + if match is None: + return [] + return _evaluate_annotation_string( + context, match.group(1).strip(), index + ).execute_annotation() + + +def find_unknown_type_vars(context, node): + def check_node(node): + if node.type in ('atom_expr', 'power'): + trailer = node.children[-1] + if trailer.type == 'trailer' and trailer.children[0] == '[': + for subscript_node in _unpack_subscriptlist(trailer.children[1]): + check_node(subscript_node) + else: + type_var_set = context.eval_node(node) + for type_var in type_var_set: + if isinstance(type_var, TypeVar) and type_var not in found: + found.append(type_var) + + found = [] # We're not using a set, because the order matters. + check_node(node) + return found + + +def _unpack_subscriptlist(subscriptlist): + if subscriptlist.type == 'subscriptlist': + for subscript in subscriptlist.children[::2]: + if subscript.type != 'subscript': + yield subscript + else: + if subscriptlist.type != 'subscript': + yield subscriptlist diff --git a/jedi/evaluate/gradual/conversion.py b/jedi/evaluate/gradual/conversion.py new file mode 100644 index 00000000..09277b70 --- /dev/null +++ b/jedi/evaluate/gradual/conversion.py @@ -0,0 +1,120 @@ +from jedi.evaluate.base_context import ContextSet, \ + NO_CONTEXTS +from jedi.evaluate.utils import to_list +from jedi.evaluate.gradual.stub_context import StubModuleContext + + +def stub_to_actual_context_set(stub_context, ignore_compiled=False): + stub_module = stub_context.get_root_context() + if not stub_module.is_stub(): + return ContextSet([stub_context]) + + was_instance = stub_context.is_instance() + if was_instance: + stub_context = stub_context.py__class__() + + qualified_names = stub_context.get_qualified_names() + contexts = _infer_from_stub(stub_module, qualified_names, ignore_compiled) + if was_instance: + contexts = ContextSet.from_sets( + c.execute_evaluated() + for c in contexts + if c.is_class() + ) + return contexts + + +def _infer_from_stub(stub_module, qualified_names, ignore_compiled): + if qualified_names is None: + return NO_CONTEXTS + + assert isinstance(stub_module, StubModuleContext), stub_module + non_stubs = stub_module.non_stub_context_set + if ignore_compiled: + non_stubs = non_stubs.filter(lambda c: not c.is_compiled()) + for name in qualified_names: + non_stubs = non_stubs.py__getattribute__(name) + return non_stubs + + +def try_stubs_to_actual_context_set(stub_contexts, prefer_stub_to_compiled=False): + return ContextSet.from_sets( + stub_to_actual_context_set(stub_context, ignore_compiled=prefer_stub_to_compiled) + or ContextSet([stub_context]) + for stub_context in stub_contexts + ) + + +@to_list +def try_stub_to_actual_names(names, prefer_stub_to_compiled=False): + for name in names: + module = name.get_root_context() + if not module.is_stub(): + yield name + continue + + name_list = name.get_qualified_names() + if name_list is None: + contexts = NO_CONTEXTS + else: + contexts = _infer_from_stub( + module, + name_list[:-1], + ignore_compiled=prefer_stub_to_compiled, + ) + if contexts and name_list: + new_names = contexts.py__getattribute__(name_list[-1], is_goto=True) + for new_name in new_names: + yield new_name + if new_names: + continue + elif contexts: + for c in contexts: + yield c.name + continue + # This is the part where if we haven't found anything, just return the + # stub name. + yield name + + +def _load_stub_module(module): + if module.is_stub(): + return module + from jedi.evaluate.gradual.typeshed import _try_to_load_stub_cached + return _try_to_load_stub_cached( + module.evaluator, + import_names=module.string_names, + actual_context_set=ContextSet([module]), + parent_module_context=None, + sys_path=module.evaluator.get_sys_path(), + ) + + +def name_to_stub(name): + return ContextSet.from_sets(_to_stub(c) for c in name.infer()) + + +def _to_stub(context): + if context.is_stub(): + return ContextSet([context]) + + was_instance = context.is_instance() + if was_instance: + context = context.py__class__() + + qualified_names = context.get_qualified_names() + stub_module = _load_stub_module(context.get_root_context()) + if stub_module is None or qualified_names is None: + return NO_CONTEXTS + + stub_contexts = ContextSet([stub_module]) + for name in qualified_names: + stub_contexts = stub_contexts.py__getattribute__(name) + + if was_instance: + stub_contexts = ContextSet.from_sets( + c.execute_evaluated() + for c in stub_contexts + if c.is_class() + ) + return stub_contexts diff --git a/jedi/evaluate/gradual/stub_context.py b/jedi/evaluate/gradual/stub_context.py new file mode 100644 index 00000000..94090c11 --- /dev/null +++ b/jedi/evaluate/gradual/stub_context.py @@ -0,0 +1,105 @@ +from jedi.evaluate.base_context import ContextWrapper +from jedi.evaluate.context.module import ModuleContext +from jedi.evaluate.filters import ParserTreeFilter, \ + TreeNameDefinition +from jedi.evaluate.gradual.typing import TypingModuleFilterWrapper + + +class StubModuleContext(ModuleContext): + def __init__(self, non_stub_context_set, *args, **kwargs): + super(StubModuleContext, self).__init__(*args, **kwargs) + self.non_stub_context_set = non_stub_context_set + + def is_stub(self): + return True + + def sub_modules_dict(self): + """ + We have to overwrite this, because it's possible to have stubs that + don't have code for all the child modules. At the time of writing this + there are for example no stubs for `json.tool`. + """ + names = {} + for context in self.non_stub_context_set: + try: + method = context.sub_modules_dict + except AttributeError: + pass + else: + names.update(method()) + names.update(super(StubModuleContext, self).sub_modules_dict()) + return names + + def _get_first_non_stub_filters(self): + for context in self.non_stub_context_set: + yield next(context.get_filters(search_global=False)) + + def _get_stub_filters(self, search_global, **filter_kwargs): + return [StubFilter( + self.evaluator, + context=self, + search_global=search_global, + **filter_kwargs + )] + list(self.iter_star_filters(search_global=search_global)) + + def get_filters(self, search_global=False, until_position=None, + origin_scope=None, **kwargs): + filters = super(StubModuleContext, self).get_filters( + search_global, until_position, origin_scope, **kwargs + ) + next(filters) # Ignore the first filter and replace it with our own + stub_filters = self._get_stub_filters( + search_global=search_global, + until_position=until_position, + origin_scope=origin_scope, + ) + for f in stub_filters: + yield f + + for f in filters: + yield f + + +class TypingModuleWrapper(StubModuleContext): + def get_filters(self, *args, **kwargs): + filters = super(TypingModuleWrapper, self).get_filters(*args, **kwargs) + yield TypingModuleFilterWrapper(next(filters)) + for f in filters: + yield f + + +# From here on down we make looking up the sys.version_info fast. +class _StubName(TreeNameDefinition): + def infer(self): + inferred = super(_StubName, self).infer() + if self.string_name == 'version_info' and self.get_root_context().py__name__() == 'sys': + return [VersionInfo(c) for c in inferred] + return inferred + + +class StubFilter(ParserTreeFilter): + name_class = _StubName + + def __init__(self, *args, **kwargs): + self._search_global = kwargs.pop('search_global') # Python 2 :/ + super(StubFilter, self).__init__(*args, **kwargs) + + def _is_name_reachable(self, name): + if not super(StubFilter, self)._is_name_reachable(name): + return False + + if not self._search_global: + # Imports in stub files are only public if they have an "as" + # export. + definition = name.get_definition() + if definition.type in ('import_from', 'import_name'): + if name.parent.type not in ('import_as_name', 'dotted_as_name'): + return False + n = name.value + if n.startswith('_') and not (n.startswith('__') and n.endswith('__')): + return False + return True + + +class VersionInfo(ContextWrapper): + pass diff --git a/jedi/evaluate/gradual/typeshed.py b/jedi/evaluate/gradual/typeshed.py new file mode 100644 index 00000000..f069c0c8 --- /dev/null +++ b/jedi/evaluate/gradual/typeshed.py @@ -0,0 +1,263 @@ +import os +import re + +from parso.file_io import FileIO +from jedi._compatibility import FileNotFoundError, cast_path +from jedi.parser_utils import get_cached_code_lines +from jedi.evaluate.cache import evaluator_function_cache +from jedi.evaluate.base_context import ContextSet +from jedi.evaluate.gradual.stub_context import TypingModuleWrapper, StubModuleContext + +_jedi_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +TYPESHED_PATH = os.path.join(_jedi_path, 'third_party', 'typeshed') + + +def _merge_create_stub_map(directories): + map_ = {} + for directory in directories: + map_.update(_create_stub_map(directory)) + return map_ + + +def _create_stub_map(directory): + """ + Create a mapping of an importable name in Python to a stub file. + """ + def generate(): + try: + listed = os.listdir(directory) + except (FileNotFoundError, OSError): + # OSError is Python 2 + return + + for entry in listed: + entry = cast_path(entry) + path = os.path.join(directory, entry) + if os.path.isdir(path): + init = os.path.join(path, '__init__.pyi') + if os.path.isfile(init): + yield entry, init + elif entry.endswith('.pyi') and os.path.isfile(path): + name = entry.rstrip('.pyi') + if name != '__init__': + yield name, path + + # Create a dictionary from the tuple generator. + return dict(generate()) + + +def _get_typeshed_directories(version_info): + check_version_list = ['2and3', str(version_info.major)] + for base in ['stdlib', 'third_party']: + base = os.path.join(TYPESHED_PATH, base) + base_list = os.listdir(base) + for base_list_entry in base_list: + match = re.match(r'(\d+)\.(\d+)$', base_list_entry) + if match is not None: + if int(match.group(1)) == version_info.major \ + and int(match.group(2)) <= version_info.minor: + check_version_list.append(base_list_entry) + + for check_version in check_version_list: + yield os.path.join(base, check_version) + + +@evaluator_function_cache() +def _load_stub(evaluator, path): + return evaluator.parse(file_io=FileIO(path), cache=True, use_latest_grammar=True) + + +_version_cache = {} + + +def _cache_stub_file_map(version_info): + """ + Returns a map of an importable name in Python to a stub file. + """ + # TODO this caches the stub files indefinitely, maybe use a time cache + # for that? + version = version_info[:2] + try: + return _version_cache[version] + except KeyError: + pass + + _version_cache[version] = file_set = \ + _merge_create_stub_map(_get_typeshed_directories(version_info)) + return file_set + + +def import_module_decorator(func): + def wrapper(evaluator, import_names, parent_module_context, sys_path, prefer_stubs): + try: + actual_context_set = evaluator.module_cache.get(import_names) + except KeyError: + if parent_module_context is not None and parent_module_context.is_stub(): + parent_module_contexts = parent_module_context.non_stub_context_set + else: + parent_module_contexts = [parent_module_context] + if import_names == ('os', 'path'): + # This is a huge exception, we follow a nested import + # ``os.path``, because it's a very important one in Python + # that is being achieved by messing with ``sys.modules`` in + # ``os``. + actual_parent = next(iter(parent_module_contexts)) + if actual_parent is None: + actual_parent, = evaluator.import_module(('os',), prefer_stubs=False) + actual_context_set = actual_parent.py__getattribute__('path') + else: + actual_context_set = ContextSet.from_sets( + func(evaluator, import_names, p, sys_path,) + for p in parent_module_contexts + ) + evaluator.module_cache.add(import_names, actual_context_set) + + if not prefer_stubs: + return actual_context_set + + stub = _try_to_load_stub_cached(evaluator, import_names, actual_context_set, + parent_module_context, sys_path) + if stub is not None: + return ContextSet([stub]) + return actual_context_set + + return wrapper + + +def _try_to_load_stub_cached(evaluator, import_names, *args, **kwargs): + try: + return evaluator.stub_module_cache[import_names] + except KeyError: + pass + + evaluator.stub_module_cache[import_names] = None + evaluator.stub_module_cache[import_names] = result = \ + _try_to_load_stub(evaluator, import_names, *args, **kwargs) + return result + + +def _try_to_load_stub(evaluator, import_names, actual_context_set, + parent_module_context, sys_path): + """ + Trying to load a stub for a set of import_names. + + This is modelled to work like "PEP 561 -- Distributing and Packaging Type + Information", see https://www.python.org/dev/peps/pep-0561. + """ + # 1. Try to load foo-stubs folders on path for import name foo. + if not parent_module_context: + # foo-stubs + for p in sys_path: + init = os.path.join(p, *import_names) + '-stubs' + os.path.sep + '__init__.pyi' + m = _try_to_load_stub_from_file(evaluator, actual_context_set, init, import_names) + if m is not None: + return m + + # 2. Try to load pyi files next to py files. + for c in actual_context_set: + try: + method = c.py__file__ + except AttributeError: + pass + else: + file_path = method() + file_paths = [] + if c.is_namespace(): + file_paths = [os.path.join(p, '__init__.pyi') for p in c.py__path__()] + elif file_path is not None and file_path.endswith('.py'): + file_paths = [file_path + 'i'] + + for file_path in file_paths: + m = _try_to_load_stub_from_file( + evaluator, + actual_context_set, + # The file path should end with .pyi + file_path, + import_names + ) + if m is not None: + return m + + # 3. Try to load typeshed + m = _load_from_typeshed(evaluator, actual_context_set, parent_module_context, import_names) + if m is not None: + return m + + # 4. Try to load pyi file somewhere if actual_context_set was not defined. + if not actual_context_set: + if parent_module_context is not None: + try: + method = parent_module_context.py__path__ + except AttributeError: + check_path = [] + else: + check_path = method() + # In case import_names + names_for_path = (import_names[-1],) + else: + check_path = sys_path + names_for_path = import_names + + for p in check_path: + m = _try_to_load_stub_from_file( + evaluator, + actual_context_set, + os.path.join(p, *names_for_path) + '.pyi', + import_names, + ) + if m is not None: + return m + + # If no stub is found, that's fine, the calling function has to deal with + # it. + return None + + +def _load_from_typeshed(evaluator, actual_context_set, parent_module_context, import_names): + import_name = import_names[-1] + map_ = None + if len(import_names) == 1: + map_ = _cache_stub_file_map(evaluator.grammar.version_info) + elif isinstance(parent_module_context, StubModuleContext): + if not parent_module_context.is_package: + # Only if it's a package (= a folder) something can be + # imported. + return None + path = parent_module_context.py__path__() + map_ = _merge_create_stub_map(path) + + if map_ is not None: + path = map_.get(import_name) + if path is not None: + return _try_to_load_stub_from_file(evaluator, actual_context_set, path, import_names) + + +def _try_to_load_stub_from_file(evaluator, actual_context_set, path, import_names): + try: + stub_module_node = _load_stub(evaluator, path) + except (OSError, IOError): # IOError is Python 2 only + # The file that you're looking for doesn't exist (anymore). + return None + else: + return create_stub_module( + evaluator, actual_context_set, stub_module_node, path, + import_names + ) + + +def create_stub_module(evaluator, actual_context_set, stub_module_node, path, import_names): + if import_names == ('typing',): + module_cls = TypingModuleWrapper + else: + module_cls = StubModuleContext + file_name = os.path.basename(path) + stub_module_context = module_cls( + actual_context_set, evaluator, stub_module_node, + path=path, + string_names=import_names, + # The code was loaded with latest_grammar, so use + # that. + code_lines=get_cached_code_lines(evaluator.latest_grammar, path), + is_package=file_name == '__init__.pyi', + ) + return stub_module_context diff --git a/jedi/evaluate/gradual/typing.py b/jedi/evaluate/gradual/typing.py new file mode 100644 index 00000000..e7733578 --- /dev/null +++ b/jedi/evaluate/gradual/typing.py @@ -0,0 +1,674 @@ +""" +We need to somehow work with the typing objects. Since the typing objects are +pretty bare we need to add all the Jedi customizations to make them work as +contexts. + +This file deals with all the typing.py cases. +""" +from jedi._compatibility import unicode, force_unicode +from jedi import debug +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.compiled import builtin_from_name +from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, Context, \ + iterator_to_context_set, HelperContextMixin, ContextWrapper +from jedi.evaluate.lazy_context import LazyKnownContexts +from jedi.evaluate.context.iterable import SequenceLiteralContext +from jedi.evaluate.arguments import repack_with_argument_clinic +from jedi.evaluate.utils import to_list +from jedi.evaluate.filters import FilterWrapper +from jedi.evaluate.names import NameWrapper, AbstractTreeName, \ + AbstractNameDefinition, ContextName +from jedi.evaluate.helpers import is_string +from jedi.evaluate.context.klass import ClassMixin + +_PROXY_CLASS_TYPES = 'Tuple Generic Protocol Callable Type'.split() +_TYPE_ALIAS_TYPES = { + 'List': 'builtins.list', + 'Dict': 'builtins.dict', + 'Set': 'builtins.set', + 'FrozenSet': 'builtins.frozenset', + 'ChainMap': 'collections.ChainMap', + 'Counter': 'collections.Counter', + 'DefaultDict': 'collections.defaultdict', + 'Deque': 'collections.deque', +} +_PROXY_TYPES = 'Optional Union ClassVar'.split() + + +class TypingName(AbstractTreeName): + def __init__(self, context, other_name): + super(TypingName, self).__init__(context.parent_context, other_name.tree_name) + self._context = context + + def infer(self): + return ContextSet([self._context]) + + +class _BaseTypingContext(Context): + def __init__(self, evaluator, parent_context, tree_name): + super(_BaseTypingContext, self).__init__(evaluator, parent_context) + self._tree_name = tree_name + + @property + def tree_node(self): + return self._tree_name + + def get_filters(self, *args, **kwargs): + # TODO this is obviously wrong. + class EmptyFilter(): + def get(self, name): + return [] + + def values(self): + return [] + + yield EmptyFilter() + + def py__class__(self): + # TODO this is obviously not correct, but at least gives us a class if + # we have none. Some of these objects don't really have a base class in + # typeshed. + return builtin_from_name(self.evaluator, u'object') + + @property + def name(self): + return ContextName(self, self._tree_name) + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self._tree_name.value) + + +class TypingModuleName(NameWrapper): + def infer(self): + return ContextSet(self._remap()) + + def _remap(self): + name = self.string_name + evaluator = self.parent_context.evaluator + try: + actual = _TYPE_ALIAS_TYPES[name] + except KeyError: + pass + else: + yield TypeAlias.create_cached(evaluator, self.parent_context, self.tree_name, actual) + return + + if name in _PROXY_CLASS_TYPES: + yield TypingClassContext.create_cached(evaluator, self.parent_context, self.tree_name) + elif name in _PROXY_TYPES: + yield TypingContext.create_cached(evaluator, self.parent_context, self.tree_name) + elif name == 'runtime': + # We don't want anything here, not sure what this function is + # supposed to do, since it just appears in the stubs and shouldn't + # have any effects there (because it's never executed). + return + elif name == 'TypeVar': + yield TypeVarClass.create_cached(evaluator, self.parent_context, self.tree_name) + elif name == 'Any': + yield Any.create_cached(evaluator, self.parent_context, self.tree_name) + elif name == 'TYPE_CHECKING': + # This is needed for e.g. imports that are only available for type + # checking or are in cycles. The user can then check this variable. + yield builtin_from_name(evaluator, u'True') + elif name == 'overload': + yield OverloadFunction.create_cached(evaluator, self.parent_context, self.tree_name) + elif name == 'cast': + # TODO implement cast + for c in self._wrapped_name.infer(): # Fuck my life Python 2 + yield c + elif name == 'TypedDict': + # TODO doesn't even exist in typeshed/typing.py, yet. But will be + # added soon. + pass + elif name in ('no_type_check', 'no_type_check_decorator'): + # This is not necessary, as long as we are not doing type checking. + for c in self._wrapped_name.infer(): # Fuck my life Python 2 + yield c + else: + # Everything else shouldn't be relevant for type checking. + for c in self._wrapped_name.infer(): # Fuck my life Python 2 + yield c + + +class TypingModuleFilterWrapper(FilterWrapper): + name_wrapper_class = TypingModuleName + + +class _WithIndexBase(_BaseTypingContext): + def __init__(self, evaluator, parent_context, name, index_context, context_of_index): + super(_WithIndexBase, self).__init__(evaluator, parent_context, name) + self._index_context = index_context + self._context_of_index = context_of_index + + def __repr__(self): + return '<%s: %s[%s]>' % ( + self.__class__.__name__, + self._tree_name.value, + self._index_context, + ) + + +class TypingContextWithIndex(_WithIndexBase): + def execute_annotation(self): + string_name = self._tree_name.value + + if string_name == 'Union': + # This is kind of a special case, because we have Unions (in Jedi + # ContextSets). + return self.gather_annotation_classes().execute_annotation() + elif string_name == 'Optional': + # Optional is basically just saying it's either None or the actual + # type. + return self.gather_annotation_classes().execute_annotation() \ + | ContextSet([builtin_from_name(self.evaluator, u'None')]) + elif string_name == 'Type': + # The type is actually already given in the index_context + return ContextSet([self._index_context]) + elif string_name == 'ClassVar': + # For now don't do anything here, ClassVars are always used. + return self._index_context.execute_annotation() + + cls = globals()[string_name] + return ContextSet([cls( + self.evaluator, + self.parent_context, + self._tree_name, + self._index_context, + self._context_of_index + )]) + + def gather_annotation_classes(self): + return ContextSet.from_sets( + _iter_over_arguments(self._index_context, self._context_of_index) + ) + + +class TypingContext(_BaseTypingContext): + index_class = TypingContextWithIndex + py__simple_getitem__ = None + + def py__getitem__(self, index_context_set, contextualized_node): + return ContextSet( + self.index_class.create_cached( + self.evaluator, + self.parent_context, + self._tree_name, + index_context, + context_of_index=contextualized_node.context) + for index_context in index_context_set + ) + + +class _TypingClassMixin(object): + def py__bases__(self): + return [LazyKnownContexts( + self.evaluator.builtins_module.py__getattribute__('object') + )] + + +class TypingClassContextWithIndex(_TypingClassMixin, TypingContextWithIndex, ClassMixin): + pass + + +class TypingClassContext(_TypingClassMixin, TypingContext, ClassMixin): + index_class = TypingClassContextWithIndex + + +def _iter_over_arguments(maybe_tuple_context, defining_context): + def iterate(): + if isinstance(maybe_tuple_context, SequenceLiteralContext): + for lazy_context in maybe_tuple_context.py__iter__(contextualized_node=None): + yield lazy_context.infer() + else: + yield ContextSet([maybe_tuple_context]) + + def resolve_forward_references(context_set): + for context in context_set: + if is_string(context): + from jedi.evaluate.gradual.annotation import _get_forward_reference_node + node = _get_forward_reference_node(defining_context, context.get_safe_value()) + if node is not None: + for c in defining_context.eval_node(node): + yield c + else: + yield context + + for context_set in iterate(): + yield ContextSet(resolve_forward_references(context_set)) + + +class TypeAlias(HelperContextMixin): + def __init__(self, evaluator, parent_context, origin_tree_name, actual): + self.evaluator = evaluator + self.parent_context = parent_context + self._origin_tree_name = origin_tree_name + self._actual = actual # e.g. builtins.list + + @property + def name(self): + return ContextName(self, self._origin_tree_name) + + def py__name__(self): + return self.name.string_name + + def __getattr__(self, name): + return getattr(self._get_type_alias_class(), name) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._actual) + + @evaluator_method_cache() + def _get_type_alias_class(self): + module_name, class_name = self._actual.split('.') + if self.evaluator.environment.version_info.major == 2 and module_name == 'builtins': + module_name = '__builtin__' + + # TODO use evaluator.import_module? + from jedi.evaluate.imports import Importer + module, = Importer( + self.evaluator, [module_name], self.evaluator.builtins_module + ).follow() + classes = module.py__getattribute__(class_name) + # There should only be one, because it's code that we control. + assert len(classes) == 1, classes + cls = next(iter(classes)) + return cls + + +class _ContainerBase(_WithIndexBase): + def _get_getitem_contexts(self, index): + args = _iter_over_arguments(self._index_context, self._context_of_index) + for i, contexts in enumerate(args): + if i == index: + return contexts + + debug.warning('No param #%s found for annotation %s', index, self._index_context) + return NO_CONTEXTS + + +class Callable(_ContainerBase): + def py__call__(self, arguments): + # The 0th index are the arguments. + return self._get_getitem_contexts(1).execute_annotation() + + +class Tuple(_ContainerBase): + def _is_homogenous(self): + # To specify a variable-length tuple of homogeneous type, Tuple[T, ...] + # is used. + if isinstance(self._index_context, SequenceLiteralContext): + entries = self._index_context.get_tree_entries() + if len(entries) == 2 and entries[1] == '...': + return True + return False + + def py__simple_getitem__(self, index): + if self._is_homogenous(): + return self._get_getitem_contexts(0).execute_annotation() + else: + if isinstance(index, int): + return self._get_getitem_contexts(index).execute_annotation() + + debug.dbg('The getitem type on Tuple was %s' % index) + return NO_CONTEXTS + + def py__iter__(self, contextualized_node=None): + if self._is_homogenous(): + yield LazyKnownContexts(self._get_getitem_contexts(0).execute_annotation()) + else: + if isinstance(self._index_context, SequenceLiteralContext): + for i in range(self._index_context.py__len__()): + yield LazyKnownContexts(self._get_getitem_contexts(i).execute_annotation()) + + def py__getitem__(self, index_context_set, contextualized_node): + if self._is_homogenous(): + return self._get_getitem_contexts(0).execute_annotation() + + return ContextSet.from_sets( + _iter_over_arguments(self._index_context, self._context_of_index) + ).execute_annotation() + + +class Generic(_ContainerBase): + pass + + +class Protocol(_ContainerBase): + pass + + +class Any(_BaseTypingContext): + def execute_annotation(self): + debug.warning('Used Any - returned no results') + return NO_CONTEXTS + + +class TypeVarClass(_BaseTypingContext): + def py__call__(self, arguments): + unpacked = arguments.unpack() + + key, lazy_context = next(unpacked, (None, None)) + var_name = self._find_string_name(lazy_context) + # The name must be given, otherwise it's useless. + if var_name is None or key is not None: + debug.warning('Found a variable without a name %s', arguments) + return NO_CONTEXTS + + return ContextSet([TypeVar.create_cached( + self.evaluator, + self.parent_context, + self._tree_name, + var_name, + unpacked + )]) + + def _find_string_name(self, lazy_context): + if lazy_context is None: + return None + + context_set = lazy_context.infer() + if not context_set: + return None + if len(context_set) > 1: + debug.warning('Found multiple contexts for a type variable: %s', context_set) + + name_context = next(iter(context_set)) + try: + method = name_context.get_safe_value + except AttributeError: + return None + else: + safe_value = method(default=None) + if self.evaluator.environment.version_info.major == 2: + if isinstance(safe_value, bytes): + return force_unicode(safe_value) + if isinstance(safe_value, (str, unicode)): + return safe_value + return None + + +class TypeVar(_BaseTypingContext): + def __init__(self, evaluator, parent_context, tree_name, var_name, unpacked_args): + super(TypeVar, self).__init__(evaluator, parent_context, tree_name) + self._var_name = var_name + + self._constraints_lazy_contexts = [] + self._bound_lazy_context = None + self._covariant_lazy_context = None + self._contravariant_lazy_context = None + for key, lazy_context in unpacked_args: + if key is None: + self._constraints_lazy_contexts.append(lazy_context) + else: + if key == 'bound': + self._bound_lazy_context = lazy_context + elif key == 'covariant': + self._covariant_lazy_context = lazy_context + elif key == 'contravariant': + self._contra_variant_lazy_context = lazy_context + else: + debug.warning('Invalid TypeVar param name %s', key) + + def py__name__(self): + return self._var_name + + def get_filters(self, *args, **kwargs): + return iter([]) + + def _get_classes(self): + if self._bound_lazy_context is not None: + return self._bound_lazy_context.infer() + if self._constraints_lazy_contexts: + return ContextSet.from_sets( + l.infer() for l in self._constraints_lazy_contexts + ) + debug.warning('Tried to infer the TypeVar %s without a given type', self._var_name) + return NO_CONTEXTS + + def is_same_class(self, other): + # Everything can match an undefined type var. + return True + + @property + def constraints(self): + return ContextSet.from_sets( + lazy.infer() for lazy in self._constraints_lazy_contexts + ) + + def execute_annotation(self): + return self._get_classes().execute_annotation() + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.py__name__()) + + +class OverloadFunction(_BaseTypingContext): + @repack_with_argument_clinic('func, /') + def py__call__(self, func_context_set): + # Just pass arguments through. + return func_context_set + + +class BoundTypeVarName(AbstractNameDefinition): + """ + This type var was bound to a certain type, e.g. int. + """ + def __init__(self, type_var, context_set): + self._type_var = type_var + self.parent_context = type_var.parent_context + self._context_set = context_set + + def infer(self): + def iter_(): + for context in self._context_set: + # Replace any with the constraints if they are there. + if isinstance(context, Any): + for constraint in self._type_var.constraints: + yield constraint + else: + yield context + return ContextSet(iter_()) + + def py__name__(self): + return self._type_var.py__name__() + + def __repr__(self): + return '<%s %s -> %s>' % (self.__class__.__name__, self.py__name__(), self._context_set) + + +class TypeVarFilter(object): + """ + A filter for all given variables in a class. + + A = TypeVar('A') + B = TypeVar('B') + class Foo(Mapping[A, B]): + ... + + In this example we would have two type vars given: A and B + """ + def __init__(self, given_types, type_vars): + self._given_types = given_types + self._type_vars = type_vars + + def get(self, name): + for i, type_var in enumerate(self._type_vars): + if type_var.py__name__() == name: + try: + return [BoundTypeVarName(type_var, self._given_types[i])] + except IndexError: + return [type_var.name] + return [] + + def values(self): + # The values are not relevant. If it's not searched exactly, the type + # vars are just global and should be looked up as that. + return [] + + +class AbstractAnnotatedClass(ClassMixin, ContextWrapper): + def get_type_var_filter(self): + return TypeVarFilter(self.get_given_types(), self.list_type_vars()) + + def get_filters(self, search_global=False, *args, **kwargs): + filters = super(AbstractAnnotatedClass, self).get_filters( + search_global, + *args, **kwargs + ) + for f in filters: + yield f + + if search_global: + # The type vars can only be looked up if it's a global search and + # not a direct lookup on the class. + yield self.get_type_var_filter() + + def is_same_class(self, other): + if not isinstance(other, AbstractAnnotatedClass): + return False + + if self.tree_node != other.tree_node: + # TODO not sure if this is nice. + return False + given_params1 = self.get_given_types() + given_params2 = other.get_given_types() + + if len(given_params1) != len(given_params2): + # If the amount of type vars doesn't match, the class doesn't + # match. + return False + + # Now compare generics + return all( + any( + # TODO why is this ordering the correct one? + cls2.is_same_class(cls1) + for cls1 in class_set1 + for cls2 in class_set2 + ) for class_set1, class_set2 in zip(given_params1, given_params2) + ) + + def py__call__(self, arguments): + instance, = super(AbstractAnnotatedClass, self).py__call__(arguments) + return ContextSet([InstanceWrapper(instance)]) + + def get_given_types(self): + raise NotImplementedError + + def define_generics(self, type_var_dict): + changed = False + new_generics = [] + for generic_set in self.get_given_types(): + contexts = NO_CONTEXTS + for generic in generic_set: + if isinstance(generic, AbstractAnnotatedClass): + new_generic = generic.define_generics(type_var_dict) + contexts |= ContextSet([new_generic]) + if new_generic != generic: + changed = True + else: + if isinstance(generic, TypeVar): + try: + contexts |= type_var_dict[generic.py__name__()] + changed = True + except KeyError: + contexts |= ContextSet([generic]) + else: + contexts |= ContextSet([generic]) + new_generics.append(contexts) + + if not changed: + # There might not be any type vars that change. In that case just + # return itself, because it does not make sense to potentially lose + # cached results. + return self + + return AnnotatedSubClass( + self._wrapped_context, + given_types=tuple(new_generics) + ) + + def __repr__(self): + return '<%s: %s%s>' % ( + self.__class__.__name__, + self._wrapped_context, + list(self.get_given_types()), + ) + + @to_list + def py__bases__(self): + for base in self._wrapped_context.py__bases__(): + yield LazyAnnotatedBaseClass(self, base) + + +class AnnotatedClass(AbstractAnnotatedClass): + def __init__(self, class_context, index_context, context_of_index): + super(AnnotatedClass, self).__init__(class_context) + self._index_context = index_context + self._context_of_index = context_of_index + + @evaluator_method_cache() + def get_given_types(self): + return list(_iter_over_arguments(self._index_context, self._context_of_index)) + + def annotate_other_class(self, cls): + return AnnotatedClass(cls, self._index_context, self._context_of_index) + + +class AnnotatedSubClass(AbstractAnnotatedClass): + def __init__(self, class_context, given_types): + super(AnnotatedSubClass, self).__init__(class_context) + self._given_types = given_types + + def get_given_types(self): + return self._given_types + + +class LazyAnnotatedBaseClass(object): + def __init__(self, class_context, lazy_base_class): + self._class_context = class_context + self._lazy_base_class = lazy_base_class + + @iterator_to_context_set + def infer(self): + for base in self._lazy_base_class.infer(): + if isinstance(base, AbstractAnnotatedClass): + # Here we have to recalculate the given types. + yield AnnotatedSubClass.create_cached( + base.evaluator, + base._wrapped_context, + tuple(self._remap_type_vars(base)), + ) + else: + yield base + + def _remap_type_vars(self, base): + filter = self._class_context.get_type_var_filter() + for type_var_set in base.get_given_types(): + new = NO_CONTEXTS + for type_var in type_var_set: + if isinstance(type_var, TypeVar): + names = filter.get(type_var.py__name__()) + new |= ContextSet.from_sets( + name.infer() for name in names + ) + else: + # Mostly will be type vars, except if in some cases + # a concrete type will already be there. In that + # case just add it to the context set. + new |= ContextSet([type_var]) + yield new + + +class InstanceWrapper(ContextWrapper): + def py__stop_iteration_returns(self): + for cls in self._wrapped_context.class_context.py__mro__(): + if cls.py__name__() == 'Generator': + given_types = cls.get_given_types() + try: + return given_types[2].execute_annotation() + except IndexError: + pass + elif cls.py__name__() == 'Iterator': + return ContextSet([builtin_from_name(self.evaluator, u'None')]) + return self._wrapped_context.py__stop_iteration_returns() diff --git a/jedi/evaluate/gradual/utils.py b/jedi/evaluate/gradual/utils.py new file mode 100644 index 00000000..7de63252 --- /dev/null +++ b/jedi/evaluate/gradual/utils.py @@ -0,0 +1,31 @@ +import os + +from jedi.evaluate.gradual.typeshed import TYPESHED_PATH, create_stub_module + + +def load_proper_stub_module(evaluator, path, import_names, module_node): + """ + This function is given a random .pyi file and should return the proper + module. + """ + assert path.endswith('.pyi') + if path.startswith(TYPESHED_PATH): + # /foo/stdlib/3/os/__init__.pyi -> stdlib/3/os/__init__ + rest = path[len(TYPESHED_PATH) + 1: -4] + split_paths = tuple(rest.split(os.path.sep)) + # Remove the stdlib/3 or third_party/3.5 part + import_names = split_paths[2:] + if import_names[-1] == '__init__': + import_names = import_names[:-1] + + if import_names is not None: + actual_context_set = evaluator.import_module(import_names, prefer_stubs=False) + if not actual_context_set: + return None + + stub = create_stub_module( + evaluator, actual_context_set, module_node, path, import_names + ) + evaluator.stub_module_cache[import_names] = stub + return stub + return None diff --git a/jedi/evaluate/helpers.py b/jedi/evaluate/helpers.py index d1c8b64a..c7071501 100644 --- a/jedi/evaluate/helpers.py +++ b/jedi/evaluate/helpers.py @@ -19,7 +19,7 @@ def is_stdlib_path(path): return False base_path = os.path.join(sys.prefix, 'lib', 'python') - return bool(re.match(re.escape(base_path) + '\d.\d', path)) + return bool(re.match(re.escape(base_path) + r'\d.\d', path)) def deep_ast_copy(obj): @@ -64,6 +64,10 @@ def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False): The option ``cut_own_trailer`` must be set to true for the second purpose. """ trailer = leaf.parent + if trailer.type == 'fstring': + from jedi.evaluate import compiled + return compiled.get_string_context_set(context.evaluator) + # The leaf may not be the last or first child, because there exist three # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples # we should not match anything more than x. @@ -162,13 +166,21 @@ def get_module_names(module, all_scopes): Returns a dictionary with name parts as keys and their call paths as values. """ - names = chain.from_iterable(module.get_used_names().values()) + names = list(chain.from_iterable(module.get_used_names().values())) if not all_scopes: # We have to filter all the names that don't have the module as a # parent_scope. There's None as a parent, because nodes in the module # node have the parent module and not suite as all the others. # Therefore it's important to catch that case. - names = [n for n in names if get_parent_scope(n).parent in (module, None)] + + def is_module_scope_name(name): + parent_scope = get_parent_scope(name) + # async functions have an extra wrapper. Strip it. + if parent_scope and parent_scope.type == 'async_stmt': + parent_scope = parent_scope.parent + return parent_scope in (module, None) + + names = [n for n in names if is_module_scope_name(n)] return names @@ -182,17 +194,12 @@ def predefine_names(context, flow_scope, dct): del predefined[flow_scope] -def is_compiled(context): - from jedi.evaluate.compiled import CompiledObject - return isinstance(context, CompiledObject) - - def is_string(context): if context.evaluator.environment.version_info.major == 2: str_classes = (unicode, bytes) else: str_classes = (unicode,) - return is_compiled(context) and isinstance(context.get_safe_value(default=None), str_classes) + return context.is_compiled() and isinstance(context.get_safe_value(default=None), str_classes) def is_literal(context): @@ -200,7 +207,7 @@ def is_literal(context): def _get_safe_value_or_none(context, accept): - if is_compiled(context): + if context.is_compiled(): value = context.get_safe_value(default=None) if isinstance(value, accept): return value @@ -214,22 +221,57 @@ def is_number(context): return _get_safe_value_or_none(context, (int, float)) is not None -class EvaluatorTypeError(Exception): - pass - - -class EvaluatorIndexError(Exception): - pass - - -class EvaluatorKeyError(Exception): +class SimpleGetItemNotFound(Exception): pass @contextmanager -def reraise_as_evaluator(*exception_classes): +def reraise_getitem_errors(*exception_classes): try: yield except exception_classes as e: - new_exc_cls = globals()['Evaluator' + e.__class__.__name__] - raise new_exc_cls(e) + raise SimpleGetItemNotFound(e) + + +def execute_evaluated(context, *value_list): + """ + Execute a function with already executed arguments. + """ + # TODO move this out of here to the evaluator. + from jedi.evaluate.arguments import ValuesArguments + from jedi.evaluate.base_context import ContextSet + arguments = ValuesArguments([ContextSet([value]) for value in value_list]) + return context.evaluator.execute(context, arguments) + + +def parse_dotted_names(nodes, is_import_from, until_node=None): + level = 0 + names = [] + for node in nodes[1:]: + if node in ('.', '...'): + if not names: + level += len(node.value) + elif node.type == 'dotted_name': + for n in node.children[::2]: + names.append(n) + if n is until_node: + break + else: + continue + break + elif node.type == 'name': + names.append(node) + if node is until_node: + break + elif node == ',': + if not is_import_from: + names = [] + else: + # Here if the keyword `import` comes along it stops checking + # for names. + break + return level, names + + +def contexts_from_qualified_names(evaluator, *names): + return evaluator.import_module(names[:-1]).py__getattribute__(names[-1]) diff --git a/jedi/evaluate/imports.py b/jedi/evaluate/imports.py index 872ad1e1..66a33a80 100644 --- a/jedi/evaluate/imports.py +++ b/jedi/evaluate/imports.py @@ -16,6 +16,7 @@ import os from parso.python import tree from parso.tree import search_ancestor from parso import python_bytes_to_unicode +from parso.file_io import KnownContentFileIO from jedi._compatibility import (FileNotFoundError, ImplicitNSInfo, force_unicode, unicode) @@ -26,10 +27,12 @@ from jedi.evaluate import sys_path from jedi.evaluate import helpers from jedi.evaluate import compiled from jedi.evaluate import analysis -from jedi.evaluate.utils import unite, dotted_from_fs_path +from jedi.evaluate.utils import unite from jedi.evaluate.cache import evaluator_method_cache -from jedi.evaluate.filters import AbstractNameDefinition +from jedi.evaluate.names import ImportName, SubModuleName from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS +from jedi.evaluate.gradual.typeshed import import_module_decorator +from jedi.evaluate.context.module import iter_module_names class ModuleCache(object): @@ -37,16 +40,14 @@ class ModuleCache(object): self._path_cache = {} self._name_cache = {} - def add(self, module, name): - path = module.py__file__() - self._path_cache[path] = module - self._name_cache[name] = module + def add(self, string_names, context_set): + #path = module.py__file__() + #self._path_cache[path] = context_set + if string_names is not None: + self._name_cache[string_names] = context_set - def iterate_modules_with_names(self): - return self._name_cache.items() - - def get(self, name): - return self._name_cache[name] + def get(self, string_names): + return self._name_cache[string_names] def get_from_path(self, path): return self._path_cache[path] @@ -95,7 +96,7 @@ def infer_import(context, tree_name, is_goto=False): for t in types ) if not is_goto: - types = ContextSet.from_set(types) + types = ContextSet(types) if not types: path = import_path + [from_import_name] @@ -146,47 +147,38 @@ class NestedImportModule(tree.Module): self._nested_import) -def _add_error(context, name, message=None): - # Should be a name, not a string! - if message is None: - name_str = str(name.value) if isinstance(name, tree.Name) else name - message = 'No module named ' + name_str - if hasattr(name, 'parent'): +def _add_error(context, name, message): + if hasattr(name, 'parent') and context is not None: analysis.add(context, 'import-error', name, message) else: debug.warning('ImportError without origin: ' + message) -class ImportName(AbstractNameDefinition): - start_pos = (1, 0) - _level = 0 +def _level_to_base_import_path(project_path, directory, level): + """ + In case the level is outside of the currently known package (something like + import .....foo), we can still try our best to help the user for + completions. + """ + for i in range(level - 1): + old = directory + directory = os.path.dirname(directory) + if old == directory: + return None, None - def __init__(self, parent_context, string_name): - self.parent_context = parent_context - self.string_name = string_name - - def infer(self): - return Importer( - self.parent_context.evaluator, - [self.string_name], - self.parent_context, - level=self._level, - ).follow() - - def goto(self): - return [m.name for m in self.infer()] - - def get_root_context(self): - # Not sure if this is correct. - return self.parent_context.get_root_context() - - @property - def api_type(self): - return 'module' - - -class SubModuleName(ImportName): - _level = 1 + d = directory + level_import_paths = [] + # Now that we are on the level that the user wants to be, calculate the + # import path for it. + while True: + if d == project_path: + return level_import_paths, d + dir_name = os.path.basename(d) + if dir_name: + level_import_paths.insert(0, dir_name) + d = os.path.dirname(d) + else: + return None, directory class Importer(object): @@ -203,201 +195,136 @@ class Importer(object): :param import_path: List of namespaces (strings or Names). """ - debug.speed('import %s' % (import_path,)) + debug.speed('import %s %s' % (import_path, module_context)) self._evaluator = evaluator self.level = level self.module_context = module_context - try: - self.file_path = module_context.py__file__() - except AttributeError: - # Can be None for certain compiled modules like 'builtins'. - self.file_path = None + self._fixed_sys_path = None + self._inference_possible = True if level: - base = module_context.py__package__().split('.') + base = module_context.py__package__() if base == [''] or base == ['__main__']: + raise NotImplementedError(module_context.py__package__()) base = [] - if level > len(base): - path = module_context.py__file__() - if path is not None: - import_path = list(import_path) - p = path - for i in range(level): - p = os.path.dirname(p) - dir_name = os.path.basename(p) - # This is not the proper way to do relative imports. However, since - # Jedi cannot be sure about the entry point, we just calculate an - # absolute path here. - if dir_name: - # TODO those sys.modules modifications are getting - # really stupid. this is the 3rd time that we're using - # this. We should probably refactor. - if path.endswith(os.path.sep + 'os.py'): - import_path.insert(0, 'os') - else: - import_path.insert(0, dir_name) - else: - _add_error( - module_context, import_path[-1], - message='Attempted relative import beyond top-level package.' - ) - import_path = [] - # If no path is defined in the module we have no ideas where we - # are in the file system. Therefore we cannot know what to do. - # In this case we just let the path there and ignore that it's - # a relative path. Not sure if that's a good idea. - else: + # We need to care for two cases, the first one is if it's a valid + # Python import. This import has a properly defined module name + # chain like `foo.bar.baz` and an import in baz is made for + # `..lala.` It can then resolve to `foo.bar.lala`. + # The else here is a heuristic for all other cases, if for example + # in `foo` you search for `...bar`, it's obviously out of scope. + # However since Jedi tries to just do it's best, we help the user + # here, because he might have specified something wrong in his + # project. + if level <= len(base): # Here we basically rewrite the level to 0. base = tuple(base) if level > 1: base = base[:-level + 1] - import_path = base + tuple(import_path) + else: + path = module_context.py__file__() + import_path = list(import_path) + if path is None: + # If no path is defined, our best case is that the current + # file is edited by a user on the current working + # directory. We need to add an initial path, because it + # will get removed as the name of the current file. + directory = os.getcwd() + else: + directory = os.path.dirname(path) + + base_import_path, base_directory = _level_to_base_import_path( + self._evaluator.project._path, directory, level, + ) + if base_directory is None: + # Everything is lost, the relative import does point + # somewhere out of the filesystem. + self._inference_possible = False + else: + self._fixed_sys_path = [force_unicode(base_directory)] + + if base_import_path is None: + if import_path: + _add_error( + module_context, import_path[0], + message='Attempted relative import beyond top-level package.' + ) + else: + import_path = base_import_path + import_path self.import_path = import_path @property - def str_import_path(self): + def _str_import_path(self): """Returns the import path as pure strings instead of `Name`.""" return tuple( name.value if isinstance(name, tree.Name) else name for name in self.import_path ) - def sys_path_with_modifications(self): - sys_path_mod = self._evaluator.get_sys_path() \ - + sys_path.check_sys_path_modifications(self.module_context) + def _sys_path_with_modifications(self): + if self._fixed_sys_path is not None: + return self._fixed_sys_path - if self.import_path and self.file_path is not None \ - and self._evaluator.environment.version_info.major == 2: - # Python2 uses an old strange way of importing relative imports. - sys_path_mod.append(force_unicode(os.path.dirname(self.file_path))) + sys_path_mod = ( + self._evaluator.get_sys_path() + + sys_path.check_sys_path_modifications(self.module_context) + ) + + if self._evaluator.environment.version_info.major == 2: + file_path = self.module_context.py__file__() + if file_path is not None: + # Python2 uses an old strange way of importing relative imports. + sys_path_mod.append(force_unicode(os.path.dirname(file_path))) return sys_path_mod def follow(self): - if not self.import_path: + if not self.import_path or not self._evaluator.infer_enabled \ + or not self._inference_possible: return NO_CONTEXTS - return self._do_import(self.import_path, self.sys_path_with_modifications()) - def _do_import(self, import_path, sys_path): - """ - This method is very similar to importlib's `_gcd_import`. - """ - import_parts = [ + import_names = tuple( force_unicode(i.value if isinstance(i, tree.Name) else i) - for i in import_path - ] - - # Handle "magic" Flask extension imports: - # ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``. - if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']: - # New style. - ipath = ('flask_' + str(import_parts[2]),) + import_path[3:] - modules = self._do_import(ipath, sys_path) - if modules: - return modules - else: - # Old style - return self._do_import(('flaskext',) + import_path[2:], sys_path) - - module_name = '.'.join(import_parts) - try: - return ContextSet(self._evaluator.module_cache.get(module_name)) - except KeyError: - pass - - if len(import_path) > 1: - # This is a recursive way of importing that works great with - # the module cache. - bases = self._do_import(import_path[:-1], sys_path) - if not bases: - return NO_CONTEXTS - # We can take the first element, because only the os special - # case yields multiple modules, which is not important for - # further imports. - parent_module = list(bases)[0] - - # This is a huge exception, we follow a nested import - # ``os.path``, because it's a very important one in Python - # that is being achieved by messing with ``sys.modules`` in - # ``os``. - if import_parts == ['os', 'path']: - return parent_module.py__getattribute__('path') - - try: - method = parent_module.py__path__ - except AttributeError: - # The module is not a package. - _add_error(self.module_context, import_path[-1]) - return NO_CONTEXTS - else: - paths = method() - debug.dbg('search_module %s in paths %s', module_name, paths) - for path in paths: - # At the moment we are only using one path. So this is - # not important to be correct. - if not isinstance(path, list): - path = [path] - code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info( - string=import_parts[-1], - path=path, - full_name=module_name - ) - if module_path is not None: - break - else: - _add_error(self.module_context, import_path[-1]) - return NO_CONTEXTS - else: - debug.dbg('search_module %s in %s', import_parts[-1], self.file_path) - # Override the sys.path. It works only good that way. - # Injecting the path directly into `find_module` did not work. - code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info( - string=import_parts[-1], - full_name=module_name, - sys_path=sys_path, - ) - if module_path is None: - # The module is not a package. - _add_error(self.module_context, import_path[-1]) - return NO_CONTEXTS - - module = _load_module( - self._evaluator, module_path, code, sys_path, - module_name=module_name, - safe_module_name=True, + for i in self.import_path ) + sys_path = self._sys_path_with_modifications() - if module is None: - # The file might raise an ImportError e.g. and therefore not be - # importable. - return NO_CONTEXTS - - return ContextSet(module) - - def _generate_name(self, name, in_module=None): - # Create a pseudo import to be able to follow them. - if in_module is None: - return ImportName(self.module_context, name) - return SubModuleName(in_module, name) + context_set = [None] + for i, name in enumerate(self.import_path): + context_set = ContextSet.from_sets([ + self._evaluator.import_module( + import_names[:i+1], + parent_module_context, + sys_path + ) for parent_module_context in context_set + ]) + if not context_set: + message = 'No module named ' + '.'.join(import_names) + _add_error(self.module_context, name, message) + return NO_CONTEXTS + return context_set def _get_module_names(self, search_path=None, in_module=None): """ Get the names of all modules in the search_path. This means file names and not names defined in the files. """ - sub = self._evaluator.compiled_subprocess - names = [] # add builtin module names if search_path is None and in_module is None: - names += [self._generate_name(name) for name in sub.get_builtin_module_names()] + names += [ImportName(self.module_context, name) + for name in self._evaluator.compiled_subprocess.get_builtin_module_names()] if search_path is None: - search_path = self.sys_path_with_modifications() + search_path = self._sys_path_with_modifications() - for name in sub.list_module_names(search_path): - names.append(self._generate_name(name, in_module=in_module)) + for name in iter_module_names(self._evaluator, search_path): + if in_module is None: + n = ImportName(self.module_context, name) + else: + n = SubModuleName(in_module, name) + names.append(n) return names def completion_names(self, evaluator, only_modules=False): @@ -405,135 +332,174 @@ class Importer(object): :param only_modules: Indicates wheter it's possible to import a definition that is not defined in a module. """ - from jedi.evaluate.context import ModuleContext - from jedi.evaluate.context.namespace import ImplicitNamespaceContext + if not self._inference_possible: + return [] + names = [] if self.import_path: # flask - if self.str_import_path == ('flask', 'ext'): + if self._str_import_path == ('flask', 'ext'): # List Flask extensions like ``flask_foo`` for mod in self._get_module_names(): modname = mod.string_name if modname.startswith('flask_'): extname = modname[len('flask_'):] - names.append(self._generate_name(extname)) + names.append(ImportName(self.module_context, extname)) # Now the old style: ``flaskext.foo`` - for dir in self.sys_path_with_modifications(): + for dir in self._sys_path_with_modifications(): flaskext = os.path.join(dir, 'flaskext') if os.path.isdir(flaskext): names += self._get_module_names([flaskext]) - for context in self.follow(): + contexts = self.follow() + for context in contexts: # Non-modules are not completable. if context.api_type != 'module': # not a module continue - # namespace packages - if isinstance(context, ModuleContext) and context.py__file__().endswith('__init__.py'): - paths = context.py__path__() - names += self._get_module_names(paths, in_module=context) + names += context.sub_modules_dict().values() - # implicit namespace packages - elif isinstance(context, ImplicitNamespaceContext): - paths = context.paths - names += self._get_module_names(paths, in_module=context) - - if only_modules: - # In the case of an import like `from x.` we don't need to - # add all the variables. - if ('os',) == self.str_import_path and not self.level: - # os.path is a hardcoded exception, because it's a - # ``sys.modules`` modification. - names.append(self._generate_name('path', context)) - - continue - - for filter in context.get_filters(search_global=False): - names += filter.values() + if not only_modules: + from jedi.evaluate.gradual.conversion import stub_to_actual_context_set + both_contexts = ContextSet.from_sets( + stub_to_actual_context_set(context, ignore_compiled=True) + for context in contexts + if context.is_stub() + ) | contexts + for c in both_contexts: + for filter in c.get_filters(search_global=False): + names += filter.values() else: - # Empty import path=completion after import - if not self.level: + if self.level: + # We only get here if the level cannot be properly calculated. + names += self._get_module_names(self._fixed_sys_path) + else: + # This is just the list of global imports. names += self._get_module_names() - - if self.file_path is not None: - path = os.path.abspath(self.file_path) - for i in range(self.level - 1): - path = os.path.dirname(path) - names += self._get_module_names([path]) - return names -def _load_module(evaluator, path=None, code=None, sys_path=None, - module_name=None, safe_module_name=False): - try: - return evaluator.module_cache.get(module_name) - except KeyError: - pass - try: - return evaluator.module_cache.get_from_path(path) - except KeyError: - pass +@import_module_decorator +def import_module(evaluator, import_names, parent_module_context, sys_path): + """ + This method is very similar to importlib's `_gcd_import`. + """ + if import_names[0] in settings.auto_import_modules: + module = _load_builtin_module(evaluator, import_names, sys_path) + if module is None: + return NO_CONTEXTS + return ContextSet([module]) - if isinstance(path, ImplicitNSInfo): + module_name = '.'.join(import_names) + if parent_module_context is None: + # Override the sys.path. It works only good that way. + # Injecting the path directly into `find_module` did not work. + file_io_or_ns, is_pkg = evaluator.compiled_subprocess.get_module_info( + string=import_names[-1], + full_name=module_name, + sys_path=sys_path, + is_global_search=True, + ) + if is_pkg is None: + return NO_CONTEXTS + else: + try: + method = parent_module_context.py__path__ + except AttributeError: + # The module is not a package. + return NO_CONTEXTS + else: + paths = method() + for path in paths: + # At the moment we are only using one path. So this is + # not important to be correct. + if not isinstance(path, list): + path = [path] + file_io_or_ns, is_pkg = evaluator.compiled_subprocess.get_module_info( + string=import_names[-1], + path=path, + full_name=module_name, + is_global_search=False, + ) + if is_pkg is not None: + break + else: + return NO_CONTEXTS + + if isinstance(file_io_or_ns, ImplicitNSInfo): from jedi.evaluate.context.namespace import ImplicitNamespaceContext module = ImplicitNamespaceContext( evaluator, - fullname=path.name, - paths=path.paths, + fullname=file_io_or_ns.name, + paths=file_io_or_ns.paths, ) + elif file_io_or_ns is None: + module = _load_builtin_module(evaluator, import_names, sys_path) + if module is None: + return NO_CONTEXTS else: - if sys_path is None: - sys_path = evaluator.get_sys_path() + module = _load_python_module( + evaluator, file_io_or_ns, sys_path, + import_names=import_names, + is_package=is_pkg, + ) - dotted_path = path and dotted_from_fs_path(path, sys_path) - if path is not None and path.endswith(('.py', '.zip', '.egg')) \ - and dotted_path not in settings.auto_import_modules: + if parent_module_context is None: + debug.dbg('global search_module %s: %s', import_names[-1], module) + else: + debug.dbg('search_module %s in paths %s: %s', module_name, paths, module) + return ContextSet([module]) - module_node = evaluator.parse( - code=code, path=path, cache=True, diff_cache=True, - cache_path=settings.cache_directory) - from jedi.evaluate.context import ModuleContext - module = ModuleContext( - evaluator, module_node, - path=path, - code_lines=get_cached_code_lines(evaluator.grammar, path), - ) - else: - module = compiled.load_module(evaluator, path=path, sys_path=sys_path) +def _load_python_module(evaluator, file_io, sys_path=None, + import_names=None, is_package=False): + try: + return evaluator.module_cache.get_from_path(file_io.path) + except KeyError: + pass - if module is not None and module_name is not None: - add_module_to_cache(evaluator, module_name, module, safe=safe_module_name) + module_node = evaluator.parse( + file_io=file_io, + cache=True, + diff_cache=settings.fast_parser, + cache_path=settings.cache_directory + ) + from jedi.evaluate.context import ModuleContext + return ModuleContext( + evaluator, module_node, + path=file_io.path, + string_names=import_names, + code_lines=get_cached_code_lines(evaluator.grammar, file_io.path), + is_package=is_package, + ) + + +def _load_builtin_module(evaluator, import_names=None, sys_path=None): + if sys_path is None: + sys_path = evaluator.get_sys_path() + + dotted_name = '.'.join(import_names) + assert dotted_name is not None + module = compiled.load_module(evaluator, dotted_name=dotted_name, sys_path=sys_path) + if module is None: + # The file might raise an ImportError e.g. and therefore not be + # importable. + return None return module -def add_module_to_cache(evaluator, module_name, module, safe=False): - if not safe and '.' not in module_name: - # We cannot add paths with dots, because that would collide with - # the sepatator dots for nested packages. Therefore we return - # `__main__` in ModuleWrapper.py__name__(), which is similar to - # Python behavior. - return - evaluator.module_cache.add(module, module_name) - - def get_modules_containing_name(evaluator, modules, name): """ Search a name in the directories of modules. """ - def check_directories(paths): - for p in paths: - if p is not None: - # We need abspath, because the seetings paths might not already - # have been converted to absolute paths. - d = os.path.dirname(os.path.abspath(p)) - for file_name in os.listdir(d): - path = os.path.join(d, file_name) - if file_name.endswith('.py'): - yield path + def check_directory(path): + d = os.path.dirname(os.path.abspath(path)) + for file_name in os.listdir(d): + path = os.path.join(d, file_name) + if file_name.endswith('.py'): + yield path - def check_fs(path): + def check_fs(path, base_names): try: f = open(path, 'rb') except FileNotFoundError: @@ -542,35 +508,52 @@ def get_modules_containing_name(evaluator, modules, name): code = python_bytes_to_unicode(f.read(), errors='replace') if name in code: e_sys_path = evaluator.get_sys_path() - module_name = sys_path.dotted_path_in_sys_path(e_sys_path, path) - module = _load_module( - evaluator, path, code, - sys_path=e_sys_path, module_name=module_name + if base_names: + module_name = os.path.basename(path) + module_name = sys_path.remove_python_path_suffix(module_name) + is_package = module_name == '__init__' + if is_package: + raise NotImplementedError( + "This is probably not possible yet, please add a failing test first") + module_name = os.path.basename(os.path.dirname(path)) + import_names = base_names + (module_name,) + else: + import_names, is_package = sys_path.transform_path_to_dotted(e_sys_path, path) + + module = _load_python_module( + evaluator, KnownContentFileIO(path, code), + sys_path=e_sys_path, + import_names=import_names, + is_package=is_package, ) + evaluator.module_cache.add(import_names, ContextSet([module])) return module # skip non python modules used_mod_paths = set() + path_with_names_to_be_checked = [] for m in modules: try: path = m.py__file__() except AttributeError: pass else: - used_mod_paths.add(path) + if path is not None: + if path not in used_mod_paths: + used_mod_paths.add(path) + path_with_names_to_be_checked.append((path, m.py__package__())) yield m if not settings.dynamic_params_for_other_modules: return - additional = set(os.path.abspath(p) for p in settings.additional_dynamic_modules) - # Check the directories of used modules. - paths = (additional | set(check_directories(used_mod_paths))) \ - - used_mod_paths + for p in settings.additional_dynamic_modules: + p = os.path.abspath(p) + if p not in used_mod_paths: + path_with_names_to_be_checked.append((p, None)) - # Sort here to make issues less random. - for p in sorted(paths): - # make testing easier, sort it - same results on every interpreter - m = check_fs(p) - if m is not None and not isinstance(m, compiled.CompiledObject): - yield m + for p, base_names in path_with_names_to_be_checked: + for file_path in check_directory(p): + m = check_fs(file_path, base_names) + if m is not None and not isinstance(m, compiled.CompiledObject): + yield m diff --git a/jedi/evaluate/jedi_typing.py b/jedi/evaluate/jedi_typing.py deleted file mode 100644 index aeb63a87..00000000 --- a/jedi/evaluate/jedi_typing.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -This module is not intended to be used in jedi, rather it will be fed to the -jedi-parser to replace classes in the typing module -""" - -try: - from collections import abc -except ImportError: - # python 2 - import collections as abc - - -def factory(typing_name, indextypes): - class Iterable(abc.Iterable): - def __iter__(self): - while True: - yield indextypes[0]() - - class Iterator(Iterable, abc.Iterator): - def next(self): - """ needed for python 2 """ - return self.__next__() - - def __next__(self): - return indextypes[0]() - - class Sequence(abc.Sequence): - def __getitem__(self, index): - return indextypes[0]() - - class MutableSequence(Sequence, abc.MutableSequence): - pass - - class List(MutableSequence, list): - pass - - class Tuple(Sequence, tuple): - def __getitem__(self, index): - if indextypes[1] == Ellipsis: - # https://www.python.org/dev/peps/pep-0484/#the-typing-module - # Tuple[int, ...] means a tuple of ints of indetermined length - return indextypes[0]() - else: - return indextypes[index]() - - class AbstractSet(Iterable, abc.Set): - pass - - class MutableSet(AbstractSet, abc.MutableSet): - pass - - class KeysView(Iterable, abc.KeysView): - pass - - class ValuesView(abc.ValuesView): - def __iter__(self): - while True: - yield indextypes[1]() - - class ItemsView(abc.ItemsView): - def __iter__(self): - while True: - yield (indextypes[0](), indextypes[1]()) - - class Mapping(Iterable, abc.Mapping): - def __getitem__(self, item): - return indextypes[1]() - - def keys(self): - return KeysView() - - def values(self): - return ValuesView() - - def items(self): - return ItemsView() - - class MutableMapping(Mapping, abc.MutableMapping): - pass - - class Dict(MutableMapping, dict): - pass - - class DefaultDict(MutableMapping, dict): - pass - - dct = { - "Sequence": Sequence, - "MutableSequence": MutableSequence, - "List": List, - "Iterable": Iterable, - "Iterator": Iterator, - "AbstractSet": AbstractSet, - "MutableSet": MutableSet, - "Mapping": Mapping, - "MutableMapping": MutableMapping, - "Tuple": Tuple, - "KeysView": KeysView, - "ItemsView": ItemsView, - "ValuesView": ValuesView, - "Dict": Dict, - "DefaultDict": DefaultDict, - } - return dct[typing_name] diff --git a/jedi/evaluate/lazy_context.py b/jedi/evaluate/lazy_context.py index 97e4b068..0501d3b8 100644 --- a/jedi/evaluate/lazy_context.py +++ b/jedi/evaluate/lazy_context.py @@ -1,6 +1,7 @@ from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS from jedi.common.utils import monkeypatch + class AbstractLazyContext(object): def __init__(self, data): self.data = data @@ -15,7 +16,7 @@ class AbstractLazyContext(object): class LazyKnownContext(AbstractLazyContext): """data is a context.""" def infer(self): - return ContextSet(self.data) + return ContextSet([self.data]) class LazyKnownContexts(AbstractLazyContext): @@ -35,14 +36,14 @@ class LazyUnknownContext(AbstractLazyContext): class LazyTreeContext(AbstractLazyContext): def __init__(self, context, node): super(LazyTreeContext, self).__init__(node) - self._context = context + self.context = context # We need to save the predefined names. It's an unfortunate side effect # that needs to be tracked otherwise results will be wrong. self._predefined_names = dict(context.predefined_names) def infer(self): - with monkeypatch(self._context, 'predefined_names', self._predefined_names): - return self._context.eval_node(self.data) + with monkeypatch(self.context, 'predefined_names', self._predefined_names): + return self.context.eval_node(self.data) def get_merged_lazy_context(lazy_contexts): diff --git a/jedi/evaluate/names.py b/jedi/evaluate/names.py new file mode 100644 index 00000000..7fe6c165 --- /dev/null +++ b/jedi/evaluate/names.py @@ -0,0 +1,206 @@ +from abc import abstractmethod + +from parso.tree import search_ancestor + +from jedi._compatibility import Parameter +from jedi.evaluate.base_context import ContextSet +from jedi.cache import memoize_method + + +class AbstractNameDefinition(object): + start_pos = None + string_name = None + parent_context = None + tree_name = None + + @abstractmethod + def infer(self): + raise NotImplementedError + + @abstractmethod + def goto(self): + # Typically names are already definitions and therefore a goto on that + # name will always result on itself. + return {self} + + @abstractmethod + def get_qualified_names(self): + raise NotImplementedError + + def get_root_context(self): + return self.parent_context.get_root_context() + + def __repr__(self): + if self.start_pos is None: + return '<%s: %s>' % (self.__class__.__name__, self.string_name) + return '<%s: %s@%s>' % (self.__class__.__name__, self.string_name, self.start_pos) + + def is_import(self): + return False + + @property + def api_type(self): + return self.parent_context.api_type + + +class AbstractTreeName(AbstractNameDefinition): + def __init__(self, parent_context, tree_name): + self.parent_context = parent_context + self.tree_name = tree_name + + def get_qualified_names(self): + parent_names = self.parent_context.get_qualified_names() + if parent_names is None: + return None + return parent_names + [self.tree_name.value] + + def goto(self): + return self.parent_context.evaluator.goto(self.parent_context, self.tree_name) + + def is_import(self): + imp = search_ancestor(self.tree_name, 'import_from', 'import_name') + return imp is not None + + @property + def string_name(self): + return self.tree_name.value + + @property + def start_pos(self): + return self.tree_name.start_pos + + +class ContextNameMixin(object): + def infer(self): + return ContextSet([self._context]) + + def get_qualified_names(self): + return self._context.get_qualified_names() + + def get_root_context(self): + if self.parent_context is None: # A module + return self._context + return super(ContextNameMixin, self).get_root_context() + + @property + def api_type(self): + return self._context.api_type + + +class ContextName(ContextNameMixin, AbstractTreeName): + def __init__(self, context, tree_name): + super(ContextName, self).__init__(context.parent_context, tree_name) + self._context = context + + def goto(self): + from jedi.evaluate.gradual.conversion import try_stub_to_actual_names + return try_stub_to_actual_names([self._context.name]) + + +class TreeNameDefinition(AbstractTreeName): + _API_TYPES = dict( + import_name='module', + import_from='module', + funcdef='function', + param='param', + classdef='class', + ) + + def infer(self): + # Refactor this, should probably be here. + from jedi.evaluate.syntax_tree import tree_name_to_contexts + parent = self.parent_context + return tree_name_to_contexts(parent.evaluator, parent, self.tree_name) + + @property + def api_type(self): + definition = self.tree_name.get_definition(import_name_always=True) + if definition is None: + return 'statement' + return self._API_TYPES.get(definition.type, 'statement') + + +class ParamName(AbstractTreeName): + api_type = u'param' + + def __init__(self, parent_context, tree_name): + self.parent_context = parent_context + self.tree_name = tree_name + + def get_kind(self): + tree_param = search_ancestor(self.tree_name, 'param') + if tree_param.star_count == 1: # *args + return Parameter.VAR_POSITIONAL + if tree_param.star_count == 2: # **kwargs + return Parameter.VAR_KEYWORD + + parent = tree_param.parent + for p in parent.children: + if p.type == 'param': + if p.star_count: + return Parameter.KEYWORD_ONLY + if p == tree_param: + break + return Parameter.POSITIONAL_OR_KEYWORD + + def infer(self): + return self.get_param().infer() + + def get_param(self): + params, _ = self.parent_context.get_executed_params_and_issues() + param_node = search_ancestor(self.tree_name, 'param') + return params[param_node.position_index] + + +class ImportName(AbstractNameDefinition): + start_pos = (1, 0) + _level = 0 + + def __init__(self, parent_context, string_name): + self._from_module_context = parent_context + self.string_name = string_name + + def get_qualified_names(self): + return [] + + @property + def parent_context(self): + m = self._from_module_context + import_contexts = self.infer() + if not import_contexts: + return m + # It's almost always possible to find the import or to not find it. The + # importing returns only one context, pretty much always. + return next(iter(import_contexts)) + + @memoize_method + def infer(self): + from jedi.evaluate.imports import Importer + m = self._from_module_context + return Importer(m.evaluator, [self.string_name], m, level=self._level).follow() + + def goto(self): + return [m.name for m in self.infer()] + + @property + def api_type(self): + return 'module' + + +class SubModuleName(ImportName): + _level = 1 + + +class NameWrapper(object): + def __init__(self, wrapped_name): + self._wrapped_name = wrapped_name + + @abstractmethod + def infer(self): + raise NotImplementedError + + def __getattr__(self, name): + return getattr(self._wrapped_name, name) + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self._wrapped_name) diff --git a/jedi/evaluate/param.py b/jedi/evaluate/param.py index 1445ef0c..ffec77e3 100644 --- a/jedi/evaluate/param.py +++ b/jedi/evaluate/param.py @@ -1,38 +1,62 @@ from collections import defaultdict +from jedi import debug from jedi.evaluate.utils import PushBackIterator from jedi.evaluate import analysis from jedi.evaluate.lazy_context import LazyKnownContext, \ LazyTreeContext, LazyUnknownContext from jedi.evaluate import docstrings -from jedi.evaluate import pep0484 from jedi.evaluate.context import iterable -def _add_argument_issue(parent_context, error_name, lazy_context, message): +def _add_argument_issue(error_name, lazy_context, message): if isinstance(lazy_context, LazyTreeContext): node = lazy_context.data if node.parent.type == 'argument': node = node.parent - analysis.add(parent_context, error_name, node, message) + return analysis.add(lazy_context.context, error_name, node, message) class ExecutedParam(object): """Fake a param and give it values.""" - def __init__(self, execution_context, param_node, lazy_context): + def __init__(self, execution_context, param_node, lazy_context, is_default=False): self._execution_context = execution_context self._param_node = param_node self._lazy_context = lazy_context self.string_name = param_node.name.value + self._is_default = is_default - def infer(self): - pep0484_hints = pep0484.infer_param(self._execution_context, self._param_node) - doc_params = docstrings.infer_param(self._execution_context, self._param_node) - if pep0484_hints or doc_params: - return pep0484_hints | doc_params + def infer_annotations(self): + from jedi.evaluate.gradual.annotation import infer_param + return infer_param(self._execution_context, self._param_node) + + def infer(self, use_hints=True): + if use_hints: + doc_params = docstrings.infer_param(self._execution_context, self._param_node) + ann = self.infer_annotations().execute_annotation() + if ann or doc_params: + return ann | doc_params return self._lazy_context.infer() + def matches_signature(self): + if self._is_default: + return True + argument_contexts = self.infer(use_hints=False).py__class__() + if self._param_node.star_count: + return True + annotations = self.infer_annotations() + if not annotations: + # If we cannot infer annotations - or there aren't any - pretend + # that the signature matches. + return True + matches = any(c1.is_sub_class_of(c2) + for c1 in argument_contexts + for c2 in annotations.gather_annotation_classes()) + debug.dbg("signature compare %s: %s <=> %s", + matches, argument_contexts, annotations, color='BLUE') + return matches + @property def var_args(self): return self._execution_context.var_args @@ -41,15 +65,35 @@ class ExecutedParam(object): return '<%s: %s>' % (self.__class__.__name__, self.string_name) -def get_params(execution_context, var_args): +def get_executed_params_and_issues(execution_context, arguments): + def too_many_args(argument): + m = _error_argument_count(funcdef, len(unpacked_va)) + # Just report an error for the first param that is not needed (like + # cPython). + if arguments.get_calling_nodes(): + # There might not be a valid calling node so check for that first. + issues.append( + _add_argument_issue( + 'type-error-too-many-arguments', + argument, + message=m + ) + ) + else: + issues.append(None) + + issues = [] # List[Optional[analysis issue]] result_params = [] param_dict = {} funcdef = execution_context.tree_node - parent_context = execution_context.parent_context + # Default params are part of the context where the function was defined. + # This means that they might have access on class variables that the + # function itself doesn't have. + default_param_context = execution_context.function_context.get_default_param_context() for param in funcdef.get_params(): param_dict[param.name.value] = param - unpacked_va = list(var_args.unpack(funcdef)) + unpacked_va = list(arguments.unpack(funcdef)) var_arg_iterator = PushBackIterator(iter(unpacked_va)) non_matching_keys = defaultdict(lambda: []) @@ -61,6 +105,7 @@ def get_params(execution_context, var_args): # args / kwargs will just be empty arrays / dicts, respectively. # Wrong value count is just ignored. If you try to test cases that are # not allowed in Python, Jedi will maybe not show any completions. + is_default = False key, argument = next(var_arg_iterator, (None, None)) while key is not None: keys_only = True @@ -73,9 +118,12 @@ def get_params(execution_context, var_args): had_multiple_value_error = True m = ("TypeError: %s() got multiple values for keyword argument '%s'." % (funcdef.name, key)) - for node in var_args.get_calling_nodes(): - analysis.add(parent_context, 'type-error-multiple-values', - node, message=m) + for contextualized_node in arguments.get_calling_nodes(): + issues.append( + analysis.add(contextualized_node.context, + 'type-error-multiple-values', + contextualized_node.node, message=m) + ) else: keys_used[key] = ExecutedParam(execution_context, key_param, argument) key, argument = next(var_arg_iterator, (None, None)) @@ -100,6 +148,8 @@ def get_params(execution_context, var_args): seq = iterable.FakeSequence(execution_context.evaluator, u'tuple', lazy_context_list) result_arg = LazyKnownContext(seq) elif param.star_count == 2: + if argument is not None: + too_many_args(argument) # **kwargs param dct = iterable.FakeDict(execution_context.evaluator, dict(non_matching_keys)) result_arg = LazyKnownContext(dct) @@ -111,16 +161,26 @@ def get_params(execution_context, var_args): if param.default is None: result_arg = LazyUnknownContext() if not keys_only: - for node in var_args.get_calling_nodes(): + for contextualized_node in arguments.get_calling_nodes(): m = _error_argument_count(funcdef, len(unpacked_va)) - analysis.add(parent_context, 'type-error-too-few-arguments', - node, message=m) + issues.append( + analysis.add( + contextualized_node.context, + 'type-error-too-few-arguments', + contextualized_node.node, + message=m, + ) + ) else: - result_arg = LazyTreeContext(parent_context, param.default) + result_arg = LazyTreeContext(default_param_context, param.default) + is_default = True else: result_arg = argument - result_params.append(ExecutedParam(execution_context, param, result_arg)) + result_params.append(ExecutedParam( + execution_context, param, result_arg, + is_default=is_default + )) if not isinstance(result_arg, LazyUnknownContext): keys_used[param.name.value] = result_params[-1] @@ -134,31 +194,30 @@ def get_params(execution_context, var_args): if not (non_matching_keys or had_multiple_value_error or param.star_count or param.default): # add a warning only if there's not another one. - for node in var_args.get_calling_nodes(): + for contextualized_node in arguments.get_calling_nodes(): m = _error_argument_count(funcdef, len(unpacked_va)) - analysis.add(parent_context, 'type-error-too-few-arguments', - node, message=m) + issues.append( + analysis.add(contextualized_node.context, + 'type-error-too-few-arguments', + contextualized_node.node, message=m) + ) for key, lazy_context in non_matching_keys.items(): m = "TypeError: %s() got an unexpected keyword argument '%s'." \ % (funcdef.name, key) - _add_argument_issue( - parent_context, - 'type-error-keyword-argument', - lazy_context, - message=m + issues.append( + _add_argument_issue( + 'type-error-keyword-argument', + lazy_context, + message=m + ) ) remaining_arguments = list(var_arg_iterator) if remaining_arguments: - m = _error_argument_count(funcdef, len(unpacked_va)) - # Just report an error for the first param that is not needed (like - # cPython). first_key, lazy_context = remaining_arguments[0] - if var_args.get_calling_nodes(): - # There might not be a valid calling node so check for that first. - _add_argument_issue(parent_context, 'type-error-too-many-arguments', lazy_context, message=m) - return result_params + too_many_args(lazy_context) + return result_params, issues def _error_argument_count(funcdef, actual_count): diff --git a/jedi/evaluate/pep0484.py b/jedi/evaluate/pep0484.py deleted file mode 100644 index f23943e1..00000000 --- a/jedi/evaluate/pep0484.py +++ /dev/null @@ -1,330 +0,0 @@ -""" -PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints -through function annotations. There is a strong suggestion in this document -that only the type of type hinting defined in PEP0484 should be allowed -as annotations in future python versions. - -The (initial / probably incomplete) implementation todo list for pep-0484: -v Function parameter annotations with builtin/custom type classes -v Function returntype annotations with builtin/custom type classes -v Function parameter annotations with strings (forward reference) -v Function return type annotations with strings (forward reference) -v Local variable type hints -v Assigned types: `Url = str\ndef get(url:Url) -> str:` -v Type hints in `with` statements -x Stub files support -x support `@no_type_check` and `@no_type_check_decorator` -x support for typing.cast() operator -x support for type hint comments for functions, `# type: (int, str) -> int`. - See comment from Guido https://github.com/davidhalter/jedi/issues/662 -""" - -import os -import re - -from parso import ParserSyntaxError, parse, split_lines -from parso.python import tree - -from jedi._compatibility import unicode, force_unicode -from jedi.evaluate.cache import evaluator_method_cache -from jedi.evaluate import compiled -from jedi.evaluate.base_context import NO_CONTEXTS, ContextSet -from jedi.evaluate.lazy_context import LazyTreeContext -from jedi.evaluate.context import ModuleContext -from jedi.evaluate.helpers import is_string -from jedi import debug -from jedi import parser_utils - - -def _evaluate_for_annotation(context, annotation, index=None): - """ - Evaluates a string-node, looking for an annotation - If index is not None, the annotation is expected to be a tuple - and we're interested in that index - """ - context_set = context.eval_node(_fix_forward_reference(context, annotation)) - return context_set.execute_evaluated() - - -def _evaluate_annotation_string(context, string, index=None): - node = _get_forward_reference_node(context, string) - if node is None: - return NO_CONTEXTS - - context_set = context.eval_node(node) - if index is not None: - context_set = context_set.filter( - lambda context: context.array_type == u'tuple' - and len(list(context.py__iter__())) >= index - ).py__getitem__(index) - return context_set.execute_evaluated() - - -def _fix_forward_reference(context, node): - evaled_nodes = context.eval_node(node) - if len(evaled_nodes) != 1: - debug.warning("Eval'ed typing index %s should lead to 1 object, " - " not %s" % (node, evaled_nodes)) - return node - - evaled_context = list(evaled_nodes)[0] - if is_string(evaled_context): - result = _get_forward_reference_node(context, evaled_context.get_safe_value()) - if result is not None: - return result - - return node - - -def _get_forward_reference_node(context, string): - try: - new_node = context.evaluator.grammar.parse( - force_unicode(string), - start_symbol='eval_input', - error_recovery=False - ) - except ParserSyntaxError: - debug.warning('Annotation not parsed: %s' % string) - return None - else: - module = context.tree_node.get_root_node() - parser_utils.move(new_node, module.end_pos[0]) - new_node.parent = context.tree_node - return new_node - - -def _split_comment_param_declaration(decl_text): - """ - Split decl_text on commas, but group generic expressions - together. - - For example, given "foo, Bar[baz, biz]" we return - ['foo', 'Bar[baz, biz]']. - - """ - try: - node = parse(decl_text, error_recovery=False).children[0] - except ParserSyntaxError: - debug.warning('Comment annotation is not valid Python: %s' % decl_text) - return [] - - if node.type == 'name': - return [node.get_code().strip()] - - params = [] - try: - children = node.children - except AttributeError: - return [] - else: - for child in children: - if child.type in ['name', 'atom_expr', 'power']: - params.append(child.get_code().strip()) - - return params - - -@evaluator_method_cache() -def infer_param(execution_context, param): - """ - Infers the type of a function parameter, using type annotations. - """ - annotation = param.annotation - if annotation is None: - # If no Python 3-style annotation, look for a Python 2-style comment - # annotation. - # Identify parameters to function in the same sequence as they would - # appear in a type comment. - all_params = [child for child in param.parent.children - if child.type == 'param'] - - node = param.parent.parent - comment = parser_utils.get_following_comment_same_line(node) - if comment is None: - return NO_CONTEXTS - - match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment) - if not match: - return NO_CONTEXTS - params_comments = _split_comment_param_declaration(match.group(1)) - - # Find the specific param being investigated - index = all_params.index(param) - # If the number of parameters doesn't match length of type comment, - # ignore first parameter (assume it's self). - if len(params_comments) != len(all_params): - debug.warning( - "Comments length != Params length %s %s", - params_comments, all_params - ) - from jedi.evaluate.context.instance import BaseInstanceFunctionExecution - if isinstance(execution_context, BaseInstanceFunctionExecution): - if index == 0: - # Assume it's self, which is already handled - return NO_CONTEXTS - index -= 1 - if index >= len(params_comments): - return NO_CONTEXTS - - param_comment = params_comments[index] - return _evaluate_annotation_string( - execution_context.get_root_context(), - param_comment - ) - module_context = execution_context.get_root_context() - return _evaluate_for_annotation(module_context, annotation) - - -def py__annotations__(funcdef): - return_annotation = funcdef.annotation - if return_annotation: - dct = {'return': return_annotation} - else: - dct = {} - for function_param in funcdef.get_params(): - param_annotation = function_param.annotation - if param_annotation is not None: - dct[function_param.name.value] = param_annotation - return dct - - -@evaluator_method_cache() -def infer_return_types(function_context): - """ - Infers the type of a function's return value, - according to type annotations. - """ - annotation = py__annotations__(function_context.tree_node).get("return", None) - if annotation is None: - # If there is no Python 3-type annotation, look for a Python 2-type annotation - node = function_context.tree_node - comment = parser_utils.get_following_comment_same_line(node) - if comment is None: - return NO_CONTEXTS - - match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment) - if not match: - return NO_CONTEXTS - - return _evaluate_annotation_string( - function_context.get_root_context(), - match.group(1).strip() - ) - - module_context = function_context.get_root_context() - return _evaluate_for_annotation(module_context, annotation) - - -_typing_module = None -_typing_module_code_lines = None - - -def _get_typing_replacement_module(grammar): - """ - The idea is to return our jedi replacement for the PEP-0484 typing module - as discussed at https://github.com/davidhalter/jedi/issues/663 - """ - global _typing_module, _typing_module_code_lines - if _typing_module is None: - typing_path = \ - os.path.abspath(os.path.join(__file__, "../jedi_typing.py")) - with open(typing_path) as f: - code = unicode(f.read()) - _typing_module = grammar.parse(code) - _typing_module_code_lines = split_lines(code, keepends=True) - return _typing_module, _typing_module_code_lines - - -def py__getitem__(context, typ, node): - if not typ.get_root_context().name.string_name == "typing": - return None - # we assume that any class using [] in a module called - # "typing" with a name for which we have a replacement - # should be replaced by that class. This is not 100% - # airtight but I don't have a better idea to check that it's - # actually the PEP-0484 typing module and not some other - if node.type == "subscriptlist": - nodes = node.children[::2] # skip the commas - else: - nodes = [node] - del node - - nodes = [_fix_forward_reference(context, node) for node in nodes] - type_name = typ.name.string_name - - # hacked in Union and Optional, since it's hard to do nicely in parsed code - if type_name in ("Union", '_Union'): - # In Python 3.6 it's still called typing.Union but it's an instance - # called _Union. - return ContextSet.from_sets(context.eval_node(node) for node in nodes) - if type_name in ("Optional", '_Optional'): - # Here we have the same issue like in Union. Therefore we also need to - # check for the instance typing._Optional (Python 3.6). - return context.eval_node(nodes[0]) - - module_node, code_lines = _get_typing_replacement_module(context.evaluator.latest_grammar) - typing = ModuleContext( - context.evaluator, - module_node=module_node, - path=None, - code_lines=code_lines, - ) - factories = typing.py__getattribute__("factory") - assert len(factories) == 1 - factory = list(factories)[0] - assert factory - function_body_nodes = factory.tree_node.children[4].children - valid_classnames = set(child.name.value - for child in function_body_nodes - if isinstance(child, tree.Class)) - if type_name not in valid_classnames: - return None - compiled_classname = compiled.create_simple_object(context.evaluator, type_name) - - from jedi.evaluate.context.iterable import FakeSequence - args = FakeSequence( - context.evaluator, - u'tuple', - [LazyTreeContext(context, n) for n in nodes] - ) - - result = factory.execute_evaluated(compiled_classname, args) - return result - - -def find_type_from_comment_hint_for(context, node, name): - return _find_type_from_comment_hint(context, node, node.children[1], name) - - -def find_type_from_comment_hint_with(context, node, name): - assert len(node.children[1].children) == 3, \ - "Can only be here when children[1] is 'foo() as f'" - varlist = node.children[1].children[2] - return _find_type_from_comment_hint(context, node, varlist, name) - - -def find_type_from_comment_hint_assign(context, node, name): - return _find_type_from_comment_hint(context, node, node.children[0], name) - - -def _find_type_from_comment_hint(context, node, varlist, name): - index = None - if varlist.type in ("testlist_star_expr", "exprlist", "testlist"): - # something like "a, b = 1, 2" - index = 0 - for child in varlist.children: - if child == name: - break - if child.type == "operator": - continue - index += 1 - else: - return [] - - comment = parser_utils.get_following_comment_same_line(node) - if comment is None: - return [] - match = re.match(r"^#\s*type:\s*([^#]*)", comment) - if match is None: - return [] - return _evaluate_annotation_string(context, match.group(1).strip(), index) diff --git a/jedi/evaluate/recursion.py b/jedi/evaluate/recursion.py index 5be3f8be..f86dedab 100644 --- a/jedi/evaluate/recursion.py +++ b/jedi/evaluate/recursion.py @@ -65,7 +65,7 @@ def execution_allowed(evaluator, node): if node in pushed_nodes: debug.warning('catched stmt recursion: %s @%s', node, - node.start_pos) + getattr(node, 'start_pos', None)) yield False else: try: @@ -77,14 +77,14 @@ def execution_allowed(evaluator, node): def execution_recursion_decorator(default=NO_CONTEXTS): def decorator(func): - def wrapper(execution, **kwargs): - detector = execution.evaluator.execution_recursion_detector - allowed = detector.push_execution(execution) + def wrapper(self, **kwargs): + detector = self.evaluator.execution_recursion_detector + limit_reached = detector.push_execution(self) try: - if allowed: + if limit_reached: result = default else: - result = func(execution, **kwargs) + result = func(self, **kwargs) finally: detector.pop_execution() return result @@ -116,6 +116,7 @@ class ExecutionRecursionDetector(object): self._parent_execution_funcs.append(funcdef) module = execution.get_root_context() + if module == self._evaluator.builtins_module: # We have control over builtins so we know they are not recursing # like crazy. Therefore we just let them execute always, because @@ -123,16 +124,30 @@ class ExecutionRecursionDetector(object): return False if self._recursion_level > recursion_limit: + debug.warning('Recursion limit (%s) reached', recursion_limit) return True if self._execution_count >= total_function_execution_limit: + debug.warning('Function execution limit (%s) reached', total_function_execution_limit) return True self._execution_count += 1 if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit: + if module.py__name__() in ('builtins', 'typing'): + return False + debug.warning( + 'Per function execution limit (%s) reached: %s', + per_function_execution_limit, + funcdef + ) return True self._funcdef_execution_counts[funcdef] += 1 if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit: + debug.warning( + 'Per function recursion limit (%s) reached: %s', + per_function_recursion_limit, + funcdef + ) return True return False diff --git a/jedi/evaluate/signature.py b/jedi/evaluate/signature.py new file mode 100644 index 00000000..9b3fc2df --- /dev/null +++ b/jedi/evaluate/signature.py @@ -0,0 +1,58 @@ +from jedi.parser_utils import get_call_signature + + +class AbstractSignature(object): + def __init__(self, context, is_bound=False): + self.context = context + self.is_bound = is_bound + + @property + def name(self): + return self.context.name + + def annotation(self): + return None + + def to_string(self): + raise NotImplementedError + + def bind(self, context): + raise NotImplementedError + + def get_param_names(self): + param_names = self._function_context.get_param_names() + if self.is_bound: + return param_names[1:] + return param_names + + +class TreeSignature(AbstractSignature): + def __init__(self, context, function_context=None, is_bound=False): + super(TreeSignature, self).__init__(context, is_bound) + self._function_context = function_context or context + + def bind(self, context): + return TreeSignature(context, self._function_context, is_bound=True) + + def annotation(self): + return self._function_context.tree_node.annotation + + def to_string(self, normalize=False): + return get_call_signature( + self._function_context.tree_node, + call_string=self.name.string_name, + omit_first_param=self.is_bound, + omit_return_annotation=self.context.is_class(), + ) + + +class BuiltinSignature(AbstractSignature): + @property + def _function_context(self): + return self.context + + def to_string(self): + return '' + + def bind(self, context): + raise NotImplementedError('pls implement, need test case, %s' % context) diff --git a/jedi/evaluate/stdlib.py b/jedi/evaluate/stdlib.py deleted file mode 100644 index 06296d93..00000000 --- a/jedi/evaluate/stdlib.py +++ /dev/null @@ -1,343 +0,0 @@ -""" -Implementations of standard library functions, because it's not possible to -understand them with Jedi. - -To add a new implementation, create a function and add it to the -``_implemented`` dict at the bottom of this module. - -Note that this module exists only to implement very specific functionality in -the standard library. The usual way to understand the standard library is the -compiled module that returns the types for C-builtins. -""" -import re - -import parso - -from jedi._compatibility import force_unicode -from jedi import debug -from jedi.evaluate.arguments import ValuesArguments -from jedi.evaluate import analysis -from jedi.evaluate import compiled -from jedi.evaluate.context.instance import InstanceFunctionExecution, \ - AbstractInstanceContext, CompiledInstance, BoundMethod, \ - AnonymousInstanceFunctionExecution -from jedi.evaluate.base_context import ContextualizedNode, \ - NO_CONTEXTS, ContextSet -from jedi.evaluate.context import ClassContext, ModuleContext -from jedi.evaluate.context import iterable -from jedi.evaluate.lazy_context import LazyTreeContext -from jedi.evaluate.syntax_tree import is_string - -# Now this is all part of fake tuples in Jedi. However super doesn't work on -# __init__ and __new__ doesn't work at all. So adding this to nametuples is -# just the easiest way. -_NAMEDTUPLE_INIT = """ - def __init__(_cls, {arg_list}): - 'A helper function for namedtuple.' - self.__iterable = ({arg_list}) - - def __iter__(self): - for i in self.__iterable: - yield i - - def __getitem__(self, y): - return self.__iterable[y] - -""" - - -class NotInStdLib(LookupError): - pass - - -def execute(evaluator, obj, arguments): - if isinstance(obj, BoundMethod): - raise NotInStdLib() - - try: - obj_name = obj.name.string_name - except AttributeError: - pass - else: - if obj.parent_context == evaluator.builtins_module: - module_name = 'builtins' - elif isinstance(obj.parent_context, ModuleContext): - module_name = obj.parent_context.name.string_name - else: - module_name = '' - - # for now we just support builtin functions. - try: - func = _implemented[module_name][obj_name] - except KeyError: - pass - else: - return func(evaluator, obj, arguments) - raise NotInStdLib() - - -def _follow_param(evaluator, arguments, index): - try: - key, lazy_context = list(arguments.unpack())[index] - except IndexError: - return NO_CONTEXTS - else: - return lazy_context.infer() - - -def argument_clinic(string, want_obj=False, want_context=False, want_arguments=False): - """ - Works like Argument Clinic (PEP 436), to validate function params. - """ - clinic_args = [] - allow_kwargs = False - optional = False - while string: - # Optional arguments have to begin with a bracket. And should always be - # at the end of the arguments. This is therefore not a proper argument - # clinic implementation. `range()` for exmple allows an optional start - # value at the beginning. - match = re.match('(?:(?:(\[),? ?|, ?|)(\w+)|, ?/)\]*', string) - string = string[len(match.group(0)):] - if not match.group(2): # A slash -> allow named arguments - allow_kwargs = True - continue - optional = optional or bool(match.group(1)) - word = match.group(2) - clinic_args.append((word, optional, allow_kwargs)) - - def f(func): - def wrapper(evaluator, obj, arguments): - debug.dbg('builtin start %s' % obj, color='MAGENTA') - result = NO_CONTEXTS - try: - lst = list(arguments.eval_argument_clinic(clinic_args)) - except ValueError: - pass - else: - kwargs = {} - if want_context: - kwargs['context'] = arguments.context - if want_obj: - kwargs['obj'] = obj - if want_arguments: - kwargs['arguments'] = arguments - result = func(evaluator, *lst, **kwargs) - finally: - debug.dbg('builtin end: %s', result, color='MAGENTA') - return result - - return wrapper - return f - - -@argument_clinic('iterator[, default], /') -def builtins_next(evaluator, iterators, defaults): - """ - TODO this function is currently not used. It's a stab at implementing next - in a different way than fake objects. This would be a bit more flexible. - """ - if evaluator.environment.version_info.major == 2: - name = 'next' - else: - name = '__next__' - - context_set = NO_CONTEXTS - for iterator in iterators: - if isinstance(iterator, AbstractInstanceContext): - context_set = ContextSet.from_sets( - n.infer() - for filter in iterator.get_filters(include_self_names=True) - for n in filter.get(name) - ).execute_evaluated() - if context_set: - return context_set - return defaults - - -@argument_clinic('object, name[, default], /') -def builtins_getattr(evaluator, objects, names, defaults=None): - # follow the first param - for obj in objects: - for name in names: - if is_string(name): - return obj.py__getattribute__(force_unicode(name.get_safe_value())) - else: - debug.warning('getattr called without str') - continue - return NO_CONTEXTS - - -@argument_clinic('object[, bases, dict], /') -def builtins_type(evaluator, objects, bases, dicts): - if bases or dicts: - # It's a type creation... maybe someday... - return NO_CONTEXTS - else: - return objects.py__class__() - - -class SuperInstance(AbstractInstanceContext): - """To be used like the object ``super`` returns.""" - def __init__(self, evaluator, cls): - su = cls.py_mro()[1] - super().__init__(evaluator, su and su[0] or self) - - -@argument_clinic('[type[, obj]], /', want_context=True) -def builtins_super(evaluator, types, objects, context): - # TODO make this able to detect multiple inheritance super - if isinstance(context, (InstanceFunctionExecution, - AnonymousInstanceFunctionExecution)): - su = context.instance.py__class__().py__bases__() - return su[0].infer().execute_evaluated() - return NO_CONTEXTS - - -@argument_clinic('sequence, /', want_obj=True, want_arguments=True) -def builtins_reversed(evaluator, sequences, obj, arguments): - # While we could do without this variable (just by using sequences), we - # want static analysis to work well. Therefore we need to generated the - # values again. - key, lazy_context = next(arguments.unpack()) - cn = None - if isinstance(lazy_context, LazyTreeContext): - # TODO access private - cn = ContextualizedNode(lazy_context._context, lazy_context.data) - ordered = list(sequences.iterate(cn)) - - rev = list(reversed(ordered)) - # Repack iterator values and then run it the normal way. This is - # necessary, because `reversed` is a function and autocompletion - # would fail in certain cases like `reversed(x).__iter__` if we - # just returned the result directly. - seq = iterable.FakeSequence(evaluator, u'list', rev) - arguments = ValuesArguments([ContextSet(seq)]) - return ContextSet(CompiledInstance(evaluator, evaluator.builtins_module, obj, arguments)) - - -@argument_clinic('obj, type, /', want_arguments=True) -def builtins_isinstance(evaluator, objects, types, arguments): - bool_results = set() - for o in objects: - cls = o.py__class__() - try: - mro_func = cls.py__mro__ - except AttributeError: - # This is temporary. Everything should have a class attribute in - # Python?! Maybe we'll leave it here, because some numpy objects or - # whatever might not. - bool_results = set([True, False]) - break - - mro = mro_func() - - for cls_or_tup in types: - if cls_or_tup.is_class(): - bool_results.add(cls_or_tup in mro) - elif cls_or_tup.name.string_name == 'tuple' \ - and cls_or_tup.get_root_context() == evaluator.builtins_module: - # Check for tuples. - classes = ContextSet.from_sets( - lazy_context.infer() - for lazy_context in cls_or_tup.iterate() - ) - bool_results.add(any(cls in mro for cls in classes)) - else: - _, lazy_context = list(arguments.unpack())[1] - if isinstance(lazy_context, LazyTreeContext): - node = lazy_context.data - message = 'TypeError: isinstance() arg 2 must be a ' \ - 'class, type, or tuple of classes and types, ' \ - 'not %s.' % cls_or_tup - analysis.add(lazy_context._context, 'type-error-isinstance', node, message) - - return ContextSet.from_iterable( - compiled.builtin_from_name(evaluator, force_unicode(str(b))) - for b in bool_results - ) - - -def collections_namedtuple(evaluator, obj, arguments): - """ - Implementation of the namedtuple function. - - This has to be done by processing the namedtuple class template and - evaluating the result. - - """ - collections_context = obj.parent_context - _class_template_set = collections_context.py__getattribute__(u'_class_template') - if not _class_template_set: - # Namedtuples are not supported on Python 2.6, early 2.7, because the - # _class_template variable is not defined, there. - return NO_CONTEXTS - - # Process arguments - # TODO here we only use one of the types, we should use all. - # TODO this is buggy, doesn't need to be a string - name = list(_follow_param(evaluator, arguments, 0))[0].get_safe_value() - _fields = list(_follow_param(evaluator, arguments, 1))[0] - if isinstance(_fields, compiled.CompiledObject): - fields = _fields.get_safe_value().replace(',', ' ').split() - elif isinstance(_fields, iterable.Sequence): - fields = [ - v.get_safe_value() - for lazy_context in _fields.py__iter__() - for v in lazy_context.infer() if is_string(v) - ] - else: - return NO_CONTEXTS - - def get_var(name): - x, = collections_context.py__getattribute__(name) - return x.get_safe_value() - - base = next(iter(_class_template_set)).get_safe_value() - base += _NAMEDTUPLE_INIT - # Build source code - code = base.format( - typename=name, - field_names=tuple(fields), - num_fields=len(fields), - arg_list=repr(tuple(fields)).replace("u'", "").replace("'", "")[1:-1], - repr_fmt=', '.join(get_var(u'_repr_template').format(name=name) for name in fields), - field_defs='\n'.join(get_var(u'_field_template').format(index=index, name=name) - for index, name in enumerate(fields)) - ) - - # Parse source code - module = evaluator.grammar.parse(code) - generated_class = next(module.iter_classdefs()) - parent_context = ModuleContext( - evaluator, module, None, - code_lines=parso.split_lines(code, keepends=True), - ) - return ContextSet(ClassContext(evaluator, parent_context, generated_class)) - - -@argument_clinic('first, /') -def _return_first_param(evaluator, firsts): - return firsts - - -_implemented = { - 'builtins': { - 'getattr': builtins_getattr, - 'type': builtins_type, - 'super': builtins_super, - 'reversed': builtins_reversed, - 'isinstance': builtins_isinstance, - }, - 'copy': { - 'copy': _return_first_param, - 'deepcopy': _return_first_param, - }, - 'json': { - 'load': lambda *args: NO_CONTEXTS, - 'loads': lambda *args: NO_CONTEXTS, - }, - 'collections': { - 'namedtuple': collections_namedtuple, - }, -} diff --git a/jedi/evaluate/syntax_tree.py b/jedi/evaluate/syntax_tree.py index 95ccf219..d3c5d8ee 100644 --- a/jedi/evaluate/syntax_tree.py +++ b/jedi/evaluate/syntax_tree.py @@ -10,20 +10,22 @@ from jedi import debug from jedi import parser_utils from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, ContextualizedNode, \ ContextualizedName, iterator_to_context_set, iterate_contexts +from jedi.evaluate.lazy_context import LazyTreeContext from jedi.evaluate import compiled -from jedi.evaluate import pep0484 from jedi.evaluate import recursion from jedi.evaluate import helpers from jedi.evaluate import analysis from jedi.evaluate import imports from jedi.evaluate import arguments -from jedi.evaluate.pep0484 import _evaluate_for_annotation from jedi.evaluate.context import ClassContext, FunctionContext from jedi.evaluate.context import iterable -from jedi.evaluate.context import TreeInstance, CompiledInstance +from jedi.evaluate.context import TreeInstance from jedi.evaluate.finder import NameFinder -from jedi.evaluate.helpers import is_string, is_literal, is_number, is_compiled +from jedi.evaluate.helpers import is_string, is_literal, is_number from jedi.evaluate.compiled.access import COMPARISON_OPERATORS +from jedi.evaluate.cache import evaluator_method_cache +from jedi.evaluate.gradual.stub_context import VersionInfo +from jedi.evaluate.gradual import annotation def _limit_context_infers(func): @@ -51,7 +53,7 @@ def _limit_context_infers(func): def _py__stop_iteration_returns(generators): - results = ContextSet() + results = NO_CONTEXTS for generator in generators: try: method = generator.py__stop_iteration_returns @@ -65,13 +67,13 @@ def _py__stop_iteration_returns(generators): @debug.increase_indent @_limit_context_infers def eval_node(context, element): - debug.dbg('eval_node %s@%s', element, element.start_pos) + debug.dbg('eval_node %s@%s in %s', element, element.start_pos, context) evaluator = context.evaluator typ = element.type - if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword'): + if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'): return eval_atom(context, element) elif typ == 'lambdef': - return ContextSet(FunctionContext(evaluator, context, element)) + return ContextSet([FunctionContext.from_context(context, element)]) elif typ == 'expr_stmt': return eval_expr_stmt(context, element) elif typ in ('power', 'atom_expr'): @@ -82,10 +84,10 @@ def eval_node(context, element): had_await = True first_child = children.pop(0) - context_set = eval_atom(context, first_child) - for trailer in children: + context_set = context.eval_node(first_child) + for (i, trailer) in enumerate(children): if trailer == '**': # has a power operation. - right = context.eval_node(children[1]) + right = context.eval_node(children[i + 1]) context_set = _eval_comparison( evaluator, context, @@ -100,12 +102,11 @@ def eval_node(context, element): await_context_set = context_set.py__getattribute__(u"__await__") if not await_context_set: debug.warning('Tried to run py__await__ on context %s', context) - context_set = ContextSet() - return _py__stop_iteration_returns(await_context_set.execute_evaluated()) + return await_context_set.execute_evaluated().py__stop_iteration_returns() return context_set elif typ in ('testlist_star_expr', 'testlist',): # The implicit tuple in statements. - return ContextSet(iterable.SequenceLiteralContext(evaluator, context, element)) + return ContextSet([iterable.SequenceLiteralContext(evaluator, context, element)]) elif typ in ('not_test', 'factor'): context_set = context.eval_node(element.children[-1]) for operator in element.children[:-1]: @@ -122,7 +123,7 @@ def eval_node(context, element): if element.value not in ('.', '...'): origin = element.parent raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin)) - return ContextSet(compiled.builtin_from_name(evaluator, u'Ellipsis')) + return ContextSet([compiled.builtin_from_name(evaluator, u'Ellipsis')]) elif typ == 'dotted_name': context_set = eval_atom(context, element.children[0]) for next_name in element.children[2::2]: @@ -132,13 +133,15 @@ def eval_node(context, element): elif typ == 'eval_input': return eval_node(context, element.children[0]) elif typ == 'annassign': - return pep0484._evaluate_for_annotation(context, element.children[1]) + return annotation.eval_annotation(context, element.children[1]) \ + .execute_annotation() elif typ == 'yield_expr': if len(element.children) and element.children[1].type == 'yield_arg': # Implies that it's a yield from. element = element.children[1].children[1] - generators = context.eval_node(element) - return _py__stop_iteration_returns(generators) + generators = context.eval_node(element) \ + .py__getattribute__('__iter__').execute_evaluated() + return generators.py__stop_iteration_returns() # Generator.send() is not implemented. return NO_CONTEXTS @@ -153,20 +156,7 @@ def eval_trailer(context, base_contexts, trailer): if trailer_op == '[': trailer_op, node, _ = trailer.children - - # TODO It's kind of stupid to cast this from a context set to a set. - foo = set(base_contexts) - # special case: PEP0484 typing module, see - # https://github.com/davidhalter/jedi/issues/663 - result = ContextSet() - for typ in list(foo): - if isinstance(typ, (ClassContext, TreeInstance)): - typing_module_types = pep0484.py__getitem__(context, typ, node) - if typing_module_types is not None: - foo.remove(typ) - result |= typing_module_types - - return result | base_contexts.get_item( + return base_contexts.get_item( eval_subscript_list(context.evaluator, context, node), ContextualizedNode(context, trailer) ) @@ -190,21 +180,33 @@ def eval_atom(context, atom): might be a name or a literal as well. """ if atom.type == 'name': + if atom.value in ('True', 'False', 'None'): + # Python 2... + return ContextSet([compiled.builtin_from_name(context.evaluator, atom.value)]) + # This is the first global lookup. stmt = tree.search_ancestor( atom, 'expr_stmt', 'lambdef' ) or atom if stmt.type == 'lambdef': stmt = atom + position = stmt.start_pos + if _is_annotation_name(atom): + # Since Python 3.7 (with from __future__ import annotations), + # annotations are essentially strings and can reference objects + # that are defined further down in code. Therefore just set the + # position to None, so the finder will not try to stop at a certain + # position in the module. + position = None return context.py__getattribute__( name_or_str=atom, - position=stmt.start_pos, + position=position, search_global=True ) elif atom.type == 'keyword': # For False/True/None if atom.value in ('False', 'True', 'None'): - return ContextSet(compiled.builtin_from_name(context.evaluator, atom.value)) + return ContextSet([compiled.builtin_from_name(context.evaluator, atom.value)]) elif atom.value == 'print': # print e.g. could be evaluated like this in Python 2.7 return NO_CONTEXTS @@ -216,7 +218,7 @@ def eval_atom(context, atom): elif isinstance(atom, tree.Literal): string = context.evaluator.compiled_subprocess.safe_literal_eval(atom.value) - return ContextSet(compiled.create_simple_object(context.evaluator, string)) + return ContextSet([compiled.create_simple_object(context.evaluator, string)]) elif atom.type == 'strings': # Will be multiple string. context_set = eval_atom(context, atom.children[0]) @@ -224,6 +226,8 @@ def eval_atom(context, atom): right = eval_atom(context, string) context_set = _eval_comparison(context.evaluator, context, context_set, u'+', right) return context_set + elif atom.type == 'fstring': + return compiled.get_string_context_set(context.evaluator) else: c = atom.children # Parentheses without commas are not tuples. @@ -245,9 +249,9 @@ def eval_atom(context, atom): pass if comp_for.type == 'comp_for': - return ContextSet(iterable.comprehension_from_atom( + return ContextSet([iterable.comprehension_from_atom( context.evaluator, context, atom - )) + )]) # It's a dict/list/tuple literal. array_node = c[1] @@ -255,11 +259,12 @@ def eval_atom(context, atom): array_node_c = array_node.children except AttributeError: array_node_c = [] - if c[0] == '{' and (array_node == '}' or ':' in array_node_c): + if c[0] == '{' and (array_node == '}' or ':' in array_node_c or + '**' in array_node_c): context = iterable.DictLiteralContext(context.evaluator, context, atom) else: context = iterable.SequenceLiteralContext(context.evaluator, context, atom) - return ContextSet(context) + return ContextSet([context]) @_limit_context_infers @@ -270,7 +275,7 @@ def eval_expr_stmt(context, stmt, seek_name=None): # necessary. if not allowed and context.get_root_context() == context.evaluator.builtins_module: try: - instance = context.instance + instance = context.var_args.instance except AttributeError: pass else: @@ -385,9 +390,9 @@ def _literals_to_types(evaluator, result): # Literals are only valid as long as the operations are # correct. Otherwise add a value-free instance. cls = compiled.builtin_from_name(evaluator, typ.name.string_name) - new_result |= cls.execute_evaluated() + new_result |= helpers.execute_evaluated(cls) else: - new_result |= ContextSet(typ) + new_result |= ContextSet([typ]) return new_result @@ -410,6 +415,22 @@ def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts ) +def _is_annotation_name(name): + ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt') + if ancestor is None: + return False + + if ancestor.type in ('param', 'funcdef'): + ann = ancestor.annotation + if ann is not None: + return ann.start_pos <= name.start_pos < ann.end_pos + elif ancestor.type == 'expr_stmt': + c = ancestor.children + if len(c) > 1 and c[1].type == 'annassign': + return c[1].start_pos <= name.start_pos < c[1].end_pos + return False + + def _is_tuple(context): return isinstance(context, iterable.Sequence) and context.array_type == 'tuple' @@ -422,6 +443,23 @@ def _bool_to_context(evaluator, bool_): return compiled.builtin_from_name(evaluator, force_unicode(str(bool_))) +def _get_tuple_ints(context): + if not isinstance(context, iterable.SequenceLiteralContext): + return None + numbers = [] + for lazy_context in context.py__iter__(): + if not isinstance(lazy_context, LazyTreeContext): + return None + node = lazy_context.data + if node.type != 'number': + return None + try: + numbers.append(int(node.value)) + except ValueError: + return None + return numbers + + def _eval_comparison_part(evaluator, context, left, operator, right): l_is_num = is_number(left) r_is_num = is_number(right) @@ -433,26 +471,26 @@ def _eval_comparison_part(evaluator, context, left, operator, right): if str_operator == '*': # for iterables, ignore * operations if isinstance(left, iterable.Sequence) or is_string(left): - return ContextSet(left) + return ContextSet([left]) elif isinstance(right, iterable.Sequence) or is_string(right): - return ContextSet(right) + return ContextSet([right]) elif str_operator == '+': if l_is_num and r_is_num or is_string(left) and is_string(right): - return ContextSet(left.execute_operation(right, str_operator)) + return ContextSet([left.execute_operation(right, str_operator)]) elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right): - return ContextSet(iterable.MergedArray(evaluator, (left, right))) + return ContextSet([iterable.MergedArray(evaluator, (left, right))]) elif str_operator == '-': if l_is_num and r_is_num: - return ContextSet(left.execute_operation(right, str_operator)) + return ContextSet([left.execute_operation(right, str_operator)]) elif str_operator == '%': # With strings and numbers the left type typically remains. Except for # `int() % float()`. - return ContextSet(left) + return ContextSet([left]) elif str_operator in COMPARISON_OPERATORS: - if is_compiled(left) and is_compiled(right): + if left.is_compiled() and right.is_compiled(): # Possible, because the return is not an option. Just compare. try: - return ContextSet(left.execute_operation(right, str_operator)) + return ContextSet([left.execute_operation(right, str_operator)]) except TypeError: # Could be True or False. pass @@ -460,15 +498,24 @@ def _eval_comparison_part(evaluator, context, left, operator, right): if str_operator in ('is', '!=', '==', 'is not'): operation = COMPARISON_OPERATORS[str_operator] bool_ = operation(left, right) - return ContextSet(_bool_to_context(evaluator, bool_)) + return ContextSet([_bool_to_context(evaluator, bool_)]) - return ContextSet(_bool_to_context(evaluator, True), _bool_to_context(evaluator, False)) + if isinstance(left, VersionInfo): + version_info = _get_tuple_ints(right) + if version_info is not None: + bool_result = compiled.access.COMPARISON_OPERATORS[operator]( + evaluator.environment.version_info, + tuple(version_info) + ) + return ContextSet([_bool_to_context(evaluator, bool_result)]) + + return ContextSet([_bool_to_context(evaluator, True), _bool_to_context(evaluator, False)]) elif str_operator == 'in': return NO_CONTEXTS def check(obj): """Checks if a Jedi object is either a float or an int.""" - return isinstance(obj, CompiledInstance) and \ + return isinstance(obj, TreeInstance) and \ obj.name.string_name in ('int', 'float') # Static analysis, one is a number, the other one is not. @@ -478,7 +525,9 @@ def _eval_comparison_part(evaluator, context, left, operator, right): analysis.add(context, 'type-error-operation', operator, message % (left, right)) - return ContextSet(left, right) + result = ContextSet([left, right]) + debug.dbg('Used operator %s resulting in %s', operator, result) + return result def _remove_statements(evaluator, context, stmt, name): @@ -489,7 +538,7 @@ def _remove_statements(evaluator, context, stmt, name): evaluated. """ pep0484_contexts = \ - pep0484.find_type_from_comment_hint_assign(context, stmt, name) + annotation.find_type_from_comment_hint_assign(context, stmt, name) if pep0484_contexts: return pep0484_contexts @@ -497,21 +546,22 @@ def _remove_statements(evaluator, context, stmt, name): def tree_name_to_contexts(evaluator, context, tree_name): - - context_set = ContextSet() + context_set = NO_CONTEXTS module_node = context.get_root_context().tree_node + # First check for annotations, like: `foo: int = 3` if module_node is not None: names = module_node.get_used_names().get(tree_name.value, []) for name in names: expr_stmt = name.parent - correct_scope = parser_utils.get_parent_scope(name) == context.tree_node - - if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign" and correct_scope: - context_set |= _evaluate_for_annotation(context, expr_stmt.children[1].children[1]) - - if context_set: - return context_set + if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign": + correct_scope = parser_utils.get_parent_scope(name) == context.tree_node + if correct_scope: + context_set |= annotation.eval_annotation( + context, expr_stmt.children[1].children[1] + ).execute_annotation() + if context_set: + return context_set types = [] node = tree_name.get_definition(import_name_always=True) @@ -526,15 +576,16 @@ def tree_name_to_contexts(evaluator, context, tree_name): filters = [next(filters)] return finder.find(filters, attribute_lookup=False) elif node.type not in ('import_from', 'import_name'): - raise ValueError("Should not happen. type: %s", node.type) + context = evaluator.create_context(context, tree_name) + return eval_atom(context, tree_name) typ = node.type if typ == 'for_stmt': - types = pep0484.find_type_from_comment_hint_for(context, node, tree_name) + types = annotation.find_type_from_comment_hint_for(context, node, tree_name) if types: return types if typ == 'with_stmt': - types = pep0484.find_type_from_comment_hint_with(context, node, tree_name) + types = annotation.find_type_from_comment_hint_with(context, node, tree_name) if types: return types @@ -566,11 +617,16 @@ def tree_name_to_contexts(evaluator, context, tree_name): # the static analysis report. exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling()) types = exceptions.execute_evaluated() + elif node.type == 'param': + types = NO_CONTEXTS else: raise ValueError("Should not happen. type: %s" % typ) return types +# We don't want to have functions/classes that are created by the same +# tree_node. +@evaluator_method_cache() def _apply_decorators(context, node): """ Returns the function, that should to be executed in the end. @@ -580,15 +636,11 @@ def _apply_decorators(context, node): decoratee_context = ClassContext( context.evaluator, parent_context=context, - classdef=node + tree_node=node ) else: - decoratee_context = FunctionContext( - context.evaluator, - parent_context=context, - funcdef=node - ) - initial = values = ContextSet(decoratee_context) + decoratee_context = FunctionContext.from_context(context, node) + initial = values = ContextSet([decoratee_context]) for dec in reversed(node.get_decorators()): debug.dbg('decorator: %s %s', dec, values) dec_values = context.eval_node(dec.children[1]) @@ -600,7 +652,12 @@ def _apply_decorators(context, node): dec_values = eval_trailer(context, dec_values, trailer) if not len(dec_values): - debug.warning('decorator not found: %s on %s', dec, node) + code = dec.get_code(include_prefix=False) + # For the short future, we don't want to hear about the runtime + # decorator in typing that was intentionally omitted. This is not + # "correct", but helps with debugging. + if code != '@runtime\n': + debug.warning('decorator not found: %s on %s', dec, node) return initial values = dec_values.execute(arguments.ValuesArguments([values])) @@ -620,6 +677,9 @@ def check_tuple_assignments(evaluator, contextualized_name, context_set): for index, node in contextualized_name.assignment_indexes(): cn = ContextualizedNode(contextualized_name.context, node) iterated = context_set.iterate(cn) + if isinstance(index, slice): + # For no star unpacking is not possible. + return NO_CONTEXTS for _ in range(index + 1): try: lazy_context = next(iterated) @@ -628,7 +688,7 @@ def check_tuple_assignments(evaluator, contextualized_name, context_set): # would allow this loop to run for a very long time if the # index number is high. Therefore break if the loop is # finished. - return ContextSet() + return NO_CONTEXTS context_set = lazy_context.infer() return context_set @@ -639,7 +699,7 @@ def eval_subscript_list(evaluator, context, index): """ if index == ':': # Like array[:] - return ContextSet(iterable.Slice(context, None, None, None)) + return ContextSet([iterable.Slice(context, None, None, None)]) elif index.type == 'subscript' and not index.children[0] == '.': # subscript basically implies a slice operation, except for Python 2's @@ -657,9 +717,9 @@ def eval_subscript_list(evaluator, context, index): result.append(el) result += [None] * (3 - len(result)) - return ContextSet(iterable.Slice(context, *result)) + return ContextSet([iterable.Slice(context, *result)]) elif index.type == 'subscriptlist': - return NO_CONTEXTS + return ContextSet([iterable.SequenceLiteralContext(evaluator, context, index)]) # No slices return context.eval_node(index) diff --git a/jedi/evaluate/sys_path.py b/jedi/evaluate/sys_path.py index d765a665..4cbef176 100644 --- a/jedi/evaluate/sys_path.py +++ b/jedi/evaluate/sys_path.py @@ -99,7 +99,7 @@ def check_sys_path_modifications(module_context): def get_sys_path_powers(names): for name in names: power = name.parent.parent - if power.type in ('power', 'atom_expr'): + if power is not None and power.type in ('power', 'atom_expr'): c = power.children if c[0].type == 'name' and c[0].value == 'sys' \ and c[1].type == 'trailer': @@ -153,6 +153,7 @@ def _get_paths_from_buildout_script(evaluator, buildout_script_path): from jedi.evaluate.context import ModuleContext module = ModuleContext( evaluator, module_node, buildout_script_path, + string_names=None, code_lines=get_cached_code_lines(evaluator.grammar, buildout_script_path), ) for path in check_sys_path_modifications(module): @@ -196,31 +197,63 @@ def _get_buildout_script_paths(search_path): continue -def dotted_path_in_sys_path(sys_path, module_path): +def remove_python_path_suffix(path): + for suffix in all_suffixes(): + if path.endswith(suffix): + path = path[:-len(suffix)] + break + return path + + +def transform_path_to_dotted(sys_path, module_path): """ - Returns the dotted path inside a sys.path. + Returns the dotted path inside a sys.path as a list of names. e.g. + + >>> from os.path import abspath + >>> transform_path_to_dotted([abspath("/foo")], abspath('/foo/bar/baz.py')) + (('bar', 'baz'), False) + + Returns (None, False) if the path doesn't really resolve to anything. + The second return part is if it is a package. """ # First remove the suffix. - for suffix in all_suffixes(): - if module_path.endswith(suffix): - module_path = module_path[:-len(suffix)] - break - else: - # There should always be a suffix in a valid Python file on the path. - return None + module_path = remove_python_path_suffix(module_path) - if module_path.startswith(os.path.sep): - # The paths in sys.path most of the times don't end with a slash. - module_path = module_path[1:] + # Once the suffix was removed we are using the files as we know them. This + # means that if someone uses an ending like .vim for a Python file, .vim + # will be part of the returned dotted part. - for p in sys_path: - if module_path.startswith(p): - rest = module_path[len(p):] - if rest: - split = rest.split(os.path.sep) - for string in split: - if not string or '.' in string: - return None - return '.'.join(split) + is_package = module_path.endswith(os.path.sep + '__init__') + if is_package: + # -1 to remove the separator + module_path = module_path[:-len('__init__') - 1] - return None + def iter_potential_solutions(): + for p in sys_path: + if module_path.startswith(p): + # Strip the trailing slash/backslash + rest = module_path[len(p):] + # On Windows a path can also use a slash. + if rest.startswith(os.path.sep) or rest.startswith('/'): + # Remove a slash in cases it's still there. + rest = rest[1:] + + if rest: + split = rest.split(os.path.sep) + if not all(split): + # This means that part of the file path was empty, this + # is very strange and is probably a file that is called + # `.py`. + return + yield tuple(split) + + potential_solutions = tuple(iter_potential_solutions()) + if not potential_solutions: + return None, False + # Try to find the shortest path, this makes more sense usually, because the + # user usually has venvs somewhere. This means that a path like + # .tox/py37/lib/python3.7/os.py can be normal for a file. However in that + # case we definitely want to return ['os'] as a path and not a crazy + # ['.tox', 'py37', 'lib', 'python3.7', 'os']. Keep in mind that this is a + # heuristic and there's now ay to "always" do it right. + return sorted(potential_solutions, key=lambda p: len(p))[0], is_package diff --git a/jedi/evaluate/usages.py b/jedi/evaluate/usages.py index 290c4695..623caf4c 100644 --- a/jedi/evaluate/usages.py +++ b/jedi/evaluate/usages.py @@ -1,6 +1,5 @@ from jedi.evaluate import imports -from jedi.evaluate.filters import TreeNameDefinition -from jedi.evaluate.context import ModuleContext +from jedi.evaluate.names import TreeNameDefinition def _resolve_names(definition_names, avoid_names=()): @@ -39,7 +38,7 @@ def usages(module_context, tree_name): search_name = tree_name.value found_names = _find_names(module_context, tree_name) modules = set(d.get_root_context() for d in found_names.values()) - modules = set(m for m in modules if isinstance(m, ModuleContext)) + modules = set(m for m in modules if m.is_module()) non_matching_usage_maps = {} for m in imports.get_modules_containing_name(module_context.evaluator, modules, search_name): diff --git a/jedi/evaluate/utils.py b/jedi/evaluate/utils.py index e00e4774..990a995e 100644 --- a/jedi/evaluate/utils.py +++ b/jedi/evaluate/utils.py @@ -11,7 +11,7 @@ from jedi._compatibility import reraise _sep = os.path.sep if os.path.altsep is not None: _sep += os.path.altsep -_path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) +_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) del _sep @@ -117,38 +117,3 @@ def indent_block(text, indention=' '): text = text[:-1] lines = text.split('\n') return '\n'.join(map(lambda s: indention + s, lines)) + temp - - -def dotted_from_fs_path(fs_path, sys_path): - """ - Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e. - compares the path with sys.path and then returns the dotted_path. If the - path is not in the sys.path, just returns None. - """ - if os.path.basename(fs_path).startswith('__init__.'): - # We are calculating the path. __init__ files are not interesting. - fs_path = os.path.dirname(fs_path) - - # prefer - # - UNIX - # /path/to/pythonX.Y/lib-dynload - # /path/to/pythonX.Y/site-packages - # - Windows - # C:\path\to\DLLs - # C:\path\to\Lib\site-packages - # over - # - UNIX - # /path/to/pythonX.Y - # - Windows - # C:\path\to\Lib - path = '' - for s in sys_path: - if (fs_path.startswith(s) and len(path) < len(s)): - path = s - - # - Window - # X:\path\to\lib-dynload/datetime.pyd => datetime - module_path = fs_path[len(path):].lstrip(os.path.sep).lstrip('/') - # - Window - # Replace like X:\path\to\something/foo/bar.py - return _path_re.sub('', module_path).replace(os.path.sep, '.').replace('/', '.') diff --git a/jedi/parser_utils.py b/jedi/parser_utils.py index bb033ed0..aaa9ffd5 100644 --- a/jedi/parser_utils.py +++ b/jedi/parser_utils.py @@ -1,3 +1,4 @@ +import re import textwrap from inspect import cleandoc @@ -137,7 +138,8 @@ def safe_literal_eval(value): return '' -def get_call_signature(funcdef, width=72, call_string=None): +def get_call_signature(funcdef, width=72, call_string=None, + omit_first_param=False, omit_return_annotation=False): """ Generate call signature of this function. @@ -154,11 +156,13 @@ def get_call_signature(funcdef, width=72, call_string=None): call_string = '' else: call_string = funcdef.name.value - if funcdef.type == 'lambdef': - p = '(' + ''.join(param.get_code() for param in funcdef.get_params()).strip() + ')' - else: - p = funcdef.children[2].get_code() - if funcdef.annotation: + params = funcdef.get_params() + if omit_first_param: + params = params[1:] + p = '(' + ''.join(param.get_code() for param in params).strip() + ')' + # TODO this is pretty bad, we should probably just normalize. + p = re.sub(r'\s+', ' ', p) + if funcdef.annotation and not omit_return_annotation: rtype = " ->" + funcdef.annotation.get_code() else: rtype = "" @@ -167,25 +171,6 @@ def get_call_signature(funcdef, width=72, call_string=None): return '\n'.join(textwrap.wrap(code, width)) -def get_doc_with_call_signature(scope_node): - """ - Return a document string including call signature. - """ - call_signature = None - if scope_node.type == 'classdef': - for funcdef in scope_node.iter_funcdefs(): - if funcdef.name.value == '__init__': - call_signature = \ - get_call_signature(funcdef, call_string=scope_node.name.value) - elif scope_node.type in ('funcdef', 'lambdef'): - call_signature = get_call_signature(scope_node) - - doc = clean_scope_docstring(scope_node) - if call_signature is None: - return doc - return '%s\n\n%s' % (call_signature, doc) - - def move(node, line_offset): """ Move the `Node` start_pos. @@ -239,11 +224,22 @@ def get_parent_scope(node, include_flows=False): Returns the underlying scope. """ scope = node.parent - while scope is not None: - if include_flows and isinstance(scope, tree.Flow): + if scope is None: + return None # It's a module already. + + while True: + if is_scope(scope) or include_flows and isinstance(scope, tree.Flow): + if scope.type in ('classdef', 'funcdef', 'lambdef'): + index = scope.children.index(':') + if scope.children[index].start_pos >= node.start_pos: + if node.parent.type == 'param' and node.parent.name == node: + pass + elif node.parent.type == 'tfpdef' and node.parent.children[0] == node: + pass + else: + scope = scope.parent + continue return scope - if is_scope(scope): - break scope = scope.parent return scope diff --git a/jedi/plugins/__init__.py b/jedi/plugins/__init__.py new file mode 100644 index 00000000..3aa48aac --- /dev/null +++ b/jedi/plugins/__init__.py @@ -0,0 +1,37 @@ +from jedi.plugins.stdlib import StdlibPlugin +from jedi.plugins.flask import FlaskPlugin + + +class _PluginManager(object): + def __init__(self, registered_plugin_classes=()): + self._registered_plugin_classes = list(registered_plugin_classes) + + def register(self, plugin_class): + """ + Makes it possible to register your plugin. + """ + self._registered_plugins.append(plugin_class) + + def _build_chain(self, evaluator): + for plugin_class in self._registered_plugin_classes: + yield plugin_class(evaluator) + + def get_callbacks(self, evaluator): + return _PluginCallbacks(self._build_chain(evaluator)) + + +class _PluginCallbacks(object): + def __init__(self, plugins): + self._plugins = list(plugins) + + def decorate(self, name, callback): + for plugin in reversed(self._plugins): + # Need to reverse so the first plugin is run first. + callback = getattr(plugin, name)(callback) + return callback + + +plugin_manager = _PluginManager([ + StdlibPlugin, + FlaskPlugin, +]) diff --git a/jedi/plugins/base.py b/jedi/plugins/base.py new file mode 100644 index 00000000..370ed2a7 --- /dev/null +++ b/jedi/plugins/base.py @@ -0,0 +1,21 @@ +class BasePlugin(object): + """ + Plugins are created each time an evaluator is created. + """ + def __init__(self, evaluator): + # In __init__ you can do some caching. + self._evaluator = evaluator + + def execute(self, callback): + """ + Decorates the execute(context, arguments) function. + """ + return callback + + def import_module(self, callback): + """ + Decorates the + import_module(evaluator, import_path, sys_path, add_error_callback) + function. + """ + return callback diff --git a/jedi/plugins/flask.py b/jedi/plugins/flask.py new file mode 100644 index 00000000..de27fe47 --- /dev/null +++ b/jedi/plugins/flask.py @@ -0,0 +1,25 @@ +from jedi.plugins.base import BasePlugin + + +class FlaskPlugin(BasePlugin): + def import_module(self, callback): + """ + Handle "magic" Flask extension imports: + ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``. + """ + def wrapper(evaluator, import_names, module_context, *args, **kwargs): + if len(import_names) == 3 and import_names[:2] == ('flask', 'ext'): + # New style. + ipath = (u'flask_' + import_names[2]), + context_set = callback(evaluator, ipath, None, *args, **kwargs) + if context_set: + return context_set + context_set = callback(evaluator, (u'flaskext',), None, *args, **kwargs) + return callback( + evaluator, + (u'flaskext', import_names[2]), + next(iter(context_set)), + *args, **kwargs + ) + return callback(evaluator, import_names, module_context, *args, **kwargs) + return wrapper diff --git a/jedi/plugins/stdlib.py b/jedi/plugins/stdlib.py new file mode 100644 index 00000000..134585cc --- /dev/null +++ b/jedi/plugins/stdlib.py @@ -0,0 +1,573 @@ +""" +Implementations of standard library functions, because it's not possible to +understand them with Jedi. + +To add a new implementation, create a function and add it to the +``_implemented`` dict at the bottom of this module. + +Note that this module exists only to implement very specific functionality in +the standard library. The usual way to understand the standard library is the +compiled module that returns the types for C-builtins. +""" +import parso + +from jedi._compatibility import force_unicode +from jedi.plugins.base import BasePlugin +from jedi import debug +from jedi.evaluate.arguments import ValuesArguments, \ + repack_with_argument_clinic, AbstractArguments, TreeArgumentsWrapper +from jedi.evaluate import analysis +from jedi.evaluate import compiled +from jedi.evaluate.context.instance import \ + AbstractInstanceContext, BoundMethod, InstanceArguments +from jedi.evaluate.base_context import ContextualizedNode, \ + NO_CONTEXTS, ContextSet, ContextWrapper +from jedi.evaluate.context import ClassContext, ModuleContext, \ + FunctionExecutionContext +from jedi.evaluate.context import iterable +from jedi.evaluate.lazy_context import LazyTreeContext, LazyKnownContext, \ + LazyKnownContexts +from jedi.evaluate.syntax_tree import is_string +from jedi.evaluate.filters import AbstractObjectOverwrite, publish_method + + +# Copied from Python 3.6's stdlib. +_NAMEDTUPLE_CLASS_TEMPLATE = """\ +_property = property +_tuple = tuple +from operator import itemgetter as _itemgetter +from collections import OrderedDict + +class {typename}(tuple): + '{typename}({arg_list})' + + __slots__ = () + + _fields = {field_names!r} + + def __new__(_cls, {arg_list}): + 'Create new instance of {typename}({arg_list})' + return _tuple.__new__(_cls, ({arg_list})) + + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + 'Make a new {typename} object from a sequence or iterable' + result = new(cls, iterable) + if len(result) != {num_fields:d}: + raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result)) + return result + + def _replace(_self, **kwds): + 'Return a new {typename} object replacing specified fields with new values' + result = _self._make(map(kwds.pop, {field_names!r}, _self)) + if kwds: + raise ValueError('Got unexpected field names: %r' % list(kwds)) + return result + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + '({repr_fmt})' % self + + def _asdict(self): + 'Return a new OrderedDict which maps field names to their values.' + return OrderedDict(zip(self._fields, self)) + + def __getnewargs__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return tuple(self) + + # These methods were added by Jedi. + # __new__ doesn't really work with Jedi. So adding this to nametuples seems + # like the easiest way. + def __init__(_cls, {arg_list}): + 'A helper function for namedtuple.' + self.__iterable = ({arg_list}) + + def __iter__(self): + for i in self.__iterable: + yield i + + def __getitem__(self, y): + return self.__iterable[y] + +{field_defs} +""" + +_NAMEDTUPLE_FIELD_TEMPLATE = '''\ + {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}') +''' + + +class StdlibPlugin(BasePlugin): + def execute(self, callback): + def wrapper(context, arguments): + debug.dbg('execute: %s %s', context, arguments) + try: + obj_name = context.name.string_name + except AttributeError: + pass + else: + if context.parent_context == self._evaluator.builtins_module: + module_name = 'builtins' + elif context.parent_context.is_module(): + module_name = context.parent_context.name.string_name + else: + return callback(context, arguments=arguments) + + if isinstance(context, BoundMethod): + if module_name == 'builtins': + if context.py__name__() == '__get__': + if context.class_context.py__name__() == 'property': + return builtins_property( + context, + arguments=arguments + ) + elif context.py__name__() in ('deleter', 'getter', 'setter'): + if context.class_context.py__name__() == 'property': + return ContextSet([context.instance]) + + return callback(context, arguments=arguments) + + # for now we just support builtin functions. + try: + func = _implemented[module_name][obj_name] + except KeyError: + pass + else: + return func(context, arguments=arguments) + return callback(context, arguments=arguments) + + return wrapper + + +def _follow_param(evaluator, arguments, index): + try: + key, lazy_context = list(arguments.unpack())[index] + except IndexError: + return NO_CONTEXTS + else: + return lazy_context.infer() + + +def argument_clinic(string, want_obj=False, want_context=False, + want_arguments=False, want_evaluator=False): + """ + Works like Argument Clinic (PEP 436), to validate function params. + """ + + def f(func): + @repack_with_argument_clinic(string, keep_arguments_param=True) + def wrapper(obj, *args, **kwargs): + arguments = kwargs.pop('arguments') + assert not kwargs # Python 2... + debug.dbg('builtin start %s' % obj, color='MAGENTA') + result = NO_CONTEXTS + if want_context: + kwargs['context'] = arguments.context + if want_obj: + kwargs['obj'] = obj + if want_evaluator: + kwargs['evaluator'] = obj.evaluator + if want_arguments: + kwargs['arguments'] = arguments + result = func(*args, **kwargs) + debug.dbg('builtin end: %s', result, color='MAGENTA') + return result + + return wrapper + return f + + +@argument_clinic('obj, type, /', want_obj=True, want_arguments=True) +def builtins_property(objects, types, obj, arguments): + property_args = obj.instance.var_args.unpack() + key, lazy_context = next(property_args, (None, None)) + if key is not None or lazy_context is None: + debug.warning('property expected a first param, not %s', arguments) + return NO_CONTEXTS + + return lazy_context.infer().py__call__(arguments=ValuesArguments([objects])) + + +@argument_clinic('iterator[, default], /', want_evaluator=True) +def builtins_next(iterators, defaults, evaluator): + if evaluator.environment.version_info.major == 2: + name = 'next' + else: + name = '__next__' + + # TODO theoretically we have to check here if something is an iterator. + # That is probably done by checking if it's not a class. + return defaults | iterators.py__getattribute__(name).execute_evaluated() + + +@argument_clinic('iterator[, default], /') +def builtins_iter(iterators_or_callables, defaults): + # TODO implement this if it's a callable. + return iterators_or_callables.py__getattribute__('__iter__').execute_evaluated() + + +@argument_clinic('object, name[, default], /') +def builtins_getattr(objects, names, defaults=None): + # follow the first param + for obj in objects: + for name in names: + if is_string(name): + return obj.py__getattribute__(force_unicode(name.get_safe_value())) + else: + debug.warning('getattr called without str') + continue + return NO_CONTEXTS + + +@argument_clinic('object[, bases, dict], /') +def builtins_type(objects, bases, dicts): + if bases or dicts: + # It's a type creation... maybe someday... + return NO_CONTEXTS + else: + return objects.py__class__() + + +class SuperInstance(AbstractInstanceContext): + """To be used like the object ``super`` returns.""" + def __init__(self, evaluator, cls): + su = cls.py_mro()[1] + super().__init__(evaluator, su and su[0] or self) + + +@argument_clinic('[type[, obj]], /', want_context=True) +def builtins_super(types, objects, context): + # TODO make this able to detect multiple inheritance super + if isinstance(context, FunctionExecutionContext): + if isinstance(context.var_args, InstanceArguments): + su = context.var_args.instance.py__class__().py__bases__() + return su[0].infer().execute_evaluated() + + return NO_CONTEXTS + + +class ReversedObject(AbstractObjectOverwrite, ContextWrapper): + def __init__(self, reversed_obj, iter_list): + super(ReversedObject, self).__init__(reversed_obj) + self._iter_list = iter_list + + def get_object(self): + return self._wrapped_context + + @publish_method('__iter__') + def py__iter__(self, contextualized_node=None): + return self._iter_list + + @publish_method('next', python_version_match=2) + @publish_method('__next__', python_version_match=3) + def py__next__(self): + return ContextSet.from_sets( + lazy_context.infer() for lazy_context in self._iter_list + ) + + +@argument_clinic('sequence, /', want_obj=True, want_arguments=True) +def builtins_reversed(sequences, obj, arguments): + # While we could do without this variable (just by using sequences), we + # want static analysis to work well. Therefore we need to generated the + # values again. + key, lazy_context = next(arguments.unpack()) + cn = None + if isinstance(lazy_context, LazyTreeContext): + # TODO access private + cn = ContextualizedNode(lazy_context.context, lazy_context.data) + ordered = list(sequences.iterate(cn)) + + # Repack iterator values and then run it the normal way. This is + # necessary, because `reversed` is a function and autocompletion + # would fail in certain cases like `reversed(x).__iter__` if we + # just returned the result directly. + seq, = obj.evaluator.typing_module.py__getattribute__('Iterator').execute_evaluated() + return ContextSet([ReversedObject(seq, list(reversed(ordered)))]) + + +@argument_clinic('obj, type, /', want_arguments=True, want_evaluator=True) +def builtins_isinstance(objects, types, arguments, evaluator): + bool_results = set() + for o in objects: + cls = o.py__class__() + try: + cls.py__bases__ + except AttributeError: + # This is temporary. Everything should have a class attribute in + # Python?! Maybe we'll leave it here, because some numpy objects or + # whatever might not. + bool_results = set([True, False]) + break + + mro = list(cls.py__mro__()) + + for cls_or_tup in types: + if cls_or_tup.is_class(): + bool_results.add(cls_or_tup in mro) + elif cls_or_tup.name.string_name == 'tuple' \ + and cls_or_tup.get_root_context() == evaluator.builtins_module: + # Check for tuples. + classes = ContextSet.from_sets( + lazy_context.infer() + for lazy_context in cls_or_tup.iterate() + ) + bool_results.add(any(cls in mro for cls in classes)) + else: + _, lazy_context = list(arguments.unpack())[1] + if isinstance(lazy_context, LazyTreeContext): + node = lazy_context.data + message = 'TypeError: isinstance() arg 2 must be a ' \ + 'class, type, or tuple of classes and types, ' \ + 'not %s.' % cls_or_tup + analysis.add(lazy_context.context, 'type-error-isinstance', node, message) + + return ContextSet( + compiled.builtin_from_name(evaluator, force_unicode(str(b))) + for b in bool_results + ) + + +class StaticMethodObject(AbstractObjectOverwrite, ContextWrapper): + def get_object(self): + return self._wrapped_context + + def py__get__(self, instance, klass): + return ContextSet([self._wrapped_context]) + + +@argument_clinic('sequence, /') +def builtins_staticmethod(functions): + return ContextSet(StaticMethodObject(f) for f in functions) + + +class ClassMethodObject(AbstractObjectOverwrite, ContextWrapper): + def __init__(self, class_method_obj, function): + super(ClassMethodObject, self).__init__(class_method_obj) + self._function = function + + def get_object(self): + return self._wrapped_context + + def py__get__(self, obj, class_context): + return ContextSet([ + ClassMethodGet(__get__, class_context, self._function) + for __get__ in self._wrapped_context.py__getattribute__('__get__') + ]) + + +class ClassMethodGet(AbstractObjectOverwrite, ContextWrapper): + def __init__(self, get_method, klass, function): + super(ClassMethodGet, self).__init__(get_method) + self._class = klass + self._function = function + + def get_object(self): + return self._wrapped_context + + def py__call__(self, arguments): + return self._function.execute(ClassMethodArguments(self._class, arguments)) + + +class ClassMethodArguments(TreeArgumentsWrapper): + def __init__(self, klass, arguments): + super(ClassMethodArguments, self).__init__(arguments) + self._class = klass + + def unpack(self, func=None): + yield None, LazyKnownContext(self._class) + for values in self._wrapped_arguments.unpack(func): + yield values + + +@argument_clinic('sequence, /', want_obj=True, want_arguments=True) +def builtins_classmethod(functions, obj, arguments): + return ContextSet( + ClassMethodObject(class_method_object, function) + for class_method_object in obj.py__call__(arguments=arguments) + for function in functions + ) + + +def collections_namedtuple(obj, arguments): + """ + Implementation of the namedtuple function. + + This has to be done by processing the namedtuple class template and + evaluating the result. + + """ + evaluator = obj.evaluator + + # Process arguments + # TODO here we only use one of the types, we should use all. + # TODO this is buggy, doesn't need to be a string + name = force_unicode(list(_follow_param(evaluator, arguments, 0))[0].get_safe_value()) + _fields = list(_follow_param(evaluator, arguments, 1))[0] + if isinstance(_fields, compiled.CompiledValue): + fields = force_unicode(_fields.get_safe_value()).replace(',', ' ').split() + elif isinstance(_fields, iterable.Sequence): + fields = [ + force_unicode(v.get_safe_value()) + for lazy_context in _fields.py__iter__() + for v in lazy_context.infer() if is_string(v) + ] + else: + return NO_CONTEXTS + + # Build source code + code = _NAMEDTUPLE_CLASS_TEMPLATE.format( + typename=name, + field_names=tuple(fields), + num_fields=len(fields), + arg_list=repr(tuple(fields)).replace("u'", "").replace("'", "")[1:-1], + repr_fmt='', + field_defs='\n'.join(_NAMEDTUPLE_FIELD_TEMPLATE.format(index=index, name=name) + for index, name in enumerate(fields)) + ) + + # Parse source code + module = evaluator.grammar.parse(code) + generated_class = next(module.iter_classdefs()) + parent_context = ModuleContext( + evaluator, module, + path=None, + string_names=None, + code_lines=parso.split_lines(code, keepends=True), + ) + + return ContextSet([ClassContext(evaluator, parent_context, generated_class)]) + + +class PartialObject(object): + def __init__(self, actual_context, arguments): + self._actual_context = actual_context + self._arguments = arguments + + def __getattr__(self, name): + return getattr(self._actual_context, name) + + def py__call__(self, arguments): + key, lazy_context = next(self._arguments.unpack(), (None, None)) + if key is not None or lazy_context is None: + debug.warning("Partial should have a proper function %s", self._arguments) + return NO_CONTEXTS + + return lazy_context.infer().execute( + MergedPartialArguments(self._arguments, arguments) + ) + + +class MergedPartialArguments(AbstractArguments): + def __init__(self, partial_arguments, call_arguments): + self._partial_arguments = partial_arguments + self._call_arguments = call_arguments + + def unpack(self, funcdef=None): + unpacked = self._partial_arguments.unpack(funcdef) + # Ignore this one, it's the function. It was checked before that it's + # there. + next(unpacked) + for key_lazy_context in unpacked: + yield key_lazy_context + for key_lazy_context in self._call_arguments.unpack(funcdef): + yield key_lazy_context + + +def functools_partial(obj, arguments): + return ContextSet( + PartialObject(instance, arguments) + for instance in obj.py__call__(arguments) + ) + + +@argument_clinic('first, /') +def _return_first_param(firsts): + return firsts + + +@argument_clinic('seq') +def _random_choice(sequences): + return ContextSet.from_sets( + lazy_context.infer() + for sequence in sequences + for lazy_context in sequence.py__iter__() + ) + + +class ItemGetterCallable(ContextWrapper): + def __init__(self, instance, args_context_set): + super(ItemGetterCallable, self).__init__(instance) + self._args_context_set = args_context_set + + @repack_with_argument_clinic('item, /') + def py__call__(self, item_context_set): + context_set = NO_CONTEXTS + for args_context in self._args_context_set: + lazy_contexts = list(args_context.py__iter__()) + if len(lazy_contexts) == 1: + # TODO we need to add the contextualized context. + context_set |= item_context_set.get_item(lazy_contexts[0].infer(), None) + else: + context_set |= ContextSet([iterable.FakeSequence( + self._wrapped_context.evaluator, + 'list', + [ + LazyKnownContexts(item_context_set.get_item(lazy_context.infer(), None)) + for lazy_context in lazy_contexts + ], + )]) + return context_set + + +@argument_clinic('*args, /', want_obj=True, want_arguments=True) +def _operator_itemgetter(args_context_set, obj, arguments): + return ContextSet([ + ItemGetterCallable(instance, args_context_set) + for instance in obj.py__call__(arguments) + ]) + + +_implemented = { + 'builtins': { + 'getattr': builtins_getattr, + 'type': builtins_type, + 'super': builtins_super, + 'reversed': builtins_reversed, + 'isinstance': builtins_isinstance, + 'next': builtins_next, + 'iter': builtins_iter, + 'staticmethod': builtins_staticmethod, + 'classmethod': builtins_classmethod, + }, + 'copy': { + 'copy': _return_first_param, + 'deepcopy': _return_first_param, + }, + 'json': { + 'load': lambda obj, arguments: NO_CONTEXTS, + 'loads': lambda obj, arguments: NO_CONTEXTS, + }, + 'collections': { + 'namedtuple': collections_namedtuple, + }, + 'functools': { + 'partial': functools_partial, + 'wraps': _return_first_param, + }, + '_weakref': { + 'proxy': _return_first_param, + }, + 'random': { + 'choice': _random_choice, + }, + 'operator': { + 'itemgetter': _operator_itemgetter, + }, + 'abc': { + # Not sure if this is necessary, but it's used a lot in typeshed and + # it's for now easier to just pass the function. + 'abstractmethod': _return_first_param, + } +} diff --git a/jedi/settings.py b/jedi/settings.py index f1ae6dbb..7b7de1be 100644 --- a/jedi/settings.py +++ b/jedi/settings.py @@ -144,7 +144,7 @@ Check for `isinstance` and other information to infer a type. """ auto_import_modules = [ - 'hashlib', # setattr + 'gi', # This third-party repository (GTK stuff) doesn't really work with jedi ] """ Modules that are not analyzed but imported, although they contain Python code. diff --git a/jedi/third_party/typeshed b/jedi/third_party/typeshed new file mode 160000 index 00000000..3319cadf --- /dev/null +++ b/jedi/third_party/typeshed @@ -0,0 +1 @@ +Subproject commit 3319cadf85012328f8a12b15da4eecc8de0cf305 diff --git a/jedi/utils.py b/jedi/utils.py index 0f42e7d5..04e87c30 100644 --- a/jedi/utils.py +++ b/jedi/utils.py @@ -113,7 +113,7 @@ def setup_readline(namespace_module=__main__): # this code. This didn't use to be an issue until 3.3. Starting with # 3.4 this is different, it always overwrites the completer if it's not # already imported here. - import rlcompleter + import rlcompleter # noqa: F401 import readline except ImportError: print("Jedi: Module readline not available.") @@ -138,5 +138,5 @@ def version_info(): """ Version = namedtuple('Version', 'major, minor, micro') from jedi import __version__ - tupl = re.findall('[a-z]+|\d+', __version__) + tupl = re.findall(r'[a-z]+|\d+', __version__) return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)]) diff --git a/pytest.ini b/pytest.ini index 8730a97e..6bb988ae 100644 --- a/pytest.ini +++ b/pytest.ini @@ -4,7 +4,7 @@ addopts = --doctest-modules # Ignore broken files in blackbox test directories norecursedirs = .* docs completion refactor absolute_import namespace_package scripts extensions speed static_analysis not_in_sys_path - sample_venvs init_extension_module simple_import + sample_venvs init_extension_module simple_import jedi/third_party # Activate `clean_jedi_cache` fixture for all tests. This should be # fine as long as we are using `clean_jedi_cache` as a session scoped diff --git a/scripts/profile_output.py b/scripts/profile_output.py index f692e002..27b9e9f5 100755 --- a/scripts/profile_output.py +++ b/scripts/profile_output.py @@ -1,10 +1,11 @@ -#!/usr/bin/env python +#!/usr/bin/env python3.6 +# -*- coding: utf-8 -*- """ -Profile a piece of Python code with ``cProfile``. Tries a completion on a +Profile a piece of Python code with ``profile``. Tries a completion on a certain piece of code. Usage: - profile.py [] [-n ] [-d] [-o] [-s ] + profile.py [] [-n ] [-d] [-o] [-s ] [-i] [--precision] profile.py -h | --help Options: @@ -12,34 +13,61 @@ Options: -n Number of passes before profiling [default: 1]. -d --debug Enable Jedi internal debugging. -o --omit Omit profiler, just do a normal run. + -i --infer Infer types instead of completions. -s Sort the profile results, e.g. cum, name [default: time]. + --precision Makes profile time formatting more precise (nanoseconds) """ import time -import cProfile +try: + # For Python 2 + import cProfile as profile +except ImportError: + import profile +import pstats from docopt import docopt import jedi -def run(code, index): +# Monkeypatch the time formatting function of profiling to make it easier to +# understand small time differences. +def f8(x): + ret = "%7.3f " % x + if ret == ' 0.000 ': + return "%6dµs" % (x * 1e6) + if ret.startswith(' 0.00'): + return "%8.4f" % x + return ret + + +def run(code, index, infer=False): start = time.time() - result = jedi.Script(code).completions() + script = jedi.Script(code) + if infer: + result = script.goto_definitions() + else: + result = script.completions() print('Used %ss for the %sth run.' % (time.time() - start, index + 1)) return result def main(args): code = args[''] + infer = args['--infer'] n = int(args['-n']) + for i in range(n): - run(code, i) + run(code, i, infer=infer) + + if args['--precision']: + pstats.f8 = f8 jedi.set_debug_function(notices=args['--debug']) if args['--omit']: - run(code, n) + run(code, n, infer=infer) else: - cProfile.runctx('run(code, n)', globals(), locals(), sort=args['-s']) + profile.runctx('run(code, n, infer=infer)', globals(), locals(), sort=args['-s']) if __name__ == '__main__': diff --git a/setup.cfg b/setup.cfg index 3c6e79cf..1295389f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,12 @@ [bdist_wheel] universal=1 + +[flake8] +max-line-length = 100 +ignore = + # do not use bare 'except' + E722, + # don't know why this was ever even an option, 1+1 should be possible. + E226, + # line break before binary operator + W503, diff --git a/setup.py b/setup.py index 18a3b44f..9ab90346 100755 --- a/setup.py +++ b/setup.py @@ -2,6 +2,7 @@ from setuptools import setup, find_packages +import os import ast __AUTHOR__ = 'David Halter' @@ -10,12 +11,15 @@ __AUTHOR_EMAIL__ = 'davidhalter88@gmail.com' # Get the version from within jedi. It's defined in exactly one place now. with open('jedi/__init__.py') as f: tree = ast.parse(f.read()) -version = tree.body[1].value.s +version = tree.body[int(not hasattr(tree, 'docstring'))].value.s readme = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read() with open('requirements.txt') as f: install_requires = f.read().splitlines() +assert os.path.isfile("jedi/third_party/typeshed/LICENSE"), \ + "Please download the typeshed submodule first" + setup(name='jedi', version=version, description='An autocompletion tool for Python that can be used for text editors.', @@ -29,10 +33,19 @@ setup(name='jedi', keywords='python completion refactoring vim', long_description=readme, packages=find_packages(exclude=['test', 'test.*']), - python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*', + python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', install_requires=install_requires, - extras_require={'dev': ['docopt']}, - package_data={'jedi': ['evaluate/compiled/fake/*.pym']}, + extras_require={ + 'testing': [ + 'pytest>=3.1.0', + # docopt for sith doctests + 'docopt', + # coloroma for colored debug output + 'colorama', + ], + }, + package_data={'jedi': ['*.pyi', 'third_party/typeshed/LICENSE', + 'third_party/typeshed/README']}, platforms=['any'], classifiers=[ 'Development Status :: 4 - Beta', @@ -43,7 +56,6 @@ setup(name='jedi', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', diff --git a/test/completion/arrays.py b/test/completion/arrays.py index 302349d3..e3dc932e 100644 --- a/test/completion/arrays.py +++ b/test/completion/arrays.py @@ -30,7 +30,7 @@ b = [6,7] #? int() b[8-7] # Something unreasonable: -#? +#? int() b[''] # ----------------- @@ -45,7 +45,7 @@ b[int():] #? list() b[:] -#? +#? int() b[:, 1] class _StrangeSlice(): @@ -161,7 +161,7 @@ def a(): return '' #? str() (a)() #? str() -(a)().replace() +(a)().title() #? int() (tuple).index() #? int() @@ -209,8 +209,7 @@ g dic2 = {'asdf': 3, 'b': 'str'} #? int() dic2['asdf'] -# TODO for now get doesn't work properly when used with a literal. -#? None +#? None int() str() dic2.get('asdf') # string literal @@ -268,11 +267,12 @@ for x in {1: 3.0, '': 1j}: dict().values().__iter__ d = dict(a=3, b='') +x, = d.values() +#? int() str() +x #? int() str() -d.values()[0] -#? int() d['a'] -#? int() None +#? int() str() None d.get('a') # ----------------- @@ -437,7 +437,7 @@ def test_func(): #? int() tuple({1})[0] -# python >= 3.3 +# python >= 3.4 # ----------------- # PEP 3132 Extended Iterable Unpacking (star unpacking) # ----------------- @@ -445,7 +445,7 @@ tuple({1})[0] a, *b, c = [1, 'b', list, dict] #? int() a -#? str() +#? b #? list c @@ -454,12 +454,14 @@ c a, *b, *c = [1, 'd', list] #? int() a -#? str() +#? b -#? list +#? c lc = [x for a, *x in [(1, '', 1.0)]] #? lc[0][0] +#? +lc[0][1] diff --git a/test/completion/basic.py b/test/completion/basic.py index ae2e3128..b72cc3d6 100644 --- a/test/completion/basic.py +++ b/test/completion/basic.py @@ -154,6 +154,9 @@ def global_define(): #? int() global_var_in_func +#? ['global_var_in_func'] +global_var_in_f + def funct1(): # From issue #610 @@ -175,6 +178,7 @@ def init_global_var_predefined(): #? int() None global_var_predefined + # ----------------- # within docstrs # ----------------- @@ -300,7 +304,7 @@ with open('') as f: #? ['closed'] f.closed for line in f: - #? str() + #? str() bytes() line with open('') as f1, open('') as f2: diff --git a/test/completion/classes.py b/test/completion/classes.py index 92b8dc90..92d6b5bb 100644 --- a/test/completion/classes.py +++ b/test/completion/classes.py @@ -36,6 +36,7 @@ class TestClass(object): self2.var_inst = first_param self2.second = second_param self2.first = first_param + self2.first.var_on_argument = 5 a = 3 def var_func(self): @@ -57,6 +58,8 @@ class TestClass(object): # should not know any class functions! #? [] values + #? + values #? ['return'] ret return a1 @@ -417,6 +420,9 @@ class PrivateVar(): def __private_func(self): return 1 + #? int() + __private_func() + def wrap_private(self): return self.__private_func() #? [] @@ -425,6 +431,8 @@ PrivateVar().__var PrivateVar().__var #? [] PrivateVar().__private_func +#? [] +PrivateVar.__private_func #? int() PrivateVar().wrap_private() @@ -571,3 +579,26 @@ class Foo(object): #? int() Foo().b + +# ----------------- +# default arguments +# ----------------- + +default = '' +class DefaultArg(): + default = 3 + def x(self, arg=default): + #? str() + default + return arg + def y(self): + return default + +#? int() +DefaultArg().x() +#? str() +DefaultArg().y() +#? int() +DefaultArg.x() +#? str() +DefaultArg.y() diff --git a/test/completion/comprehensions.py b/test/completion/comprehensions.py index 402ef753..b2da01bc 100644 --- a/test/completion/comprehensions.py +++ b/test/completion/comprehensions.py @@ -52,12 +52,12 @@ left [a for a in {1:'x'}][0] # list comprehensions should also work in combination with functions -def listen(arg): +def _listen(arg): for x in arg: #? str() x -listen(['' for x in [1]]) +_listen(['' for x in [1]]) #? ([str for x in []])[0] @@ -212,3 +212,14 @@ next(iter({a for a in range(10)})) #? int() [a for a in {1, 2, 3}][0] + +# ----------------- +# syntax errors +# ----------------- + +# Issue #1146 + +#? ['list'] +[int(str(x.value) for x in list + +def reset_missing_bracket(): pass diff --git a/test/completion/context.py b/test/completion/context.py index d3e79b81..d77c79c3 100644 --- a/test/completion/context.py +++ b/test/completion/context.py @@ -16,8 +16,18 @@ class Y(X): #? ['func'] def f + #? ['__doc__'] + __doc__ #? [] + def __doc__ + + # This might or might not be what we wanted, currently properties are also + # used like this. IMO this is not wanted ~dave. + #? ['__class__'] def __class__ + #? [] + __class__ + #? ['__repr__'] def __repr__ diff --git a/test/completion/dynamic_arrays.py b/test/completion/dynamic_arrays.py index 5cb52fc6..0f03c8fd 100644 --- a/test/completion/dynamic_arrays.py +++ b/test/completion/dynamic_arrays.py @@ -55,9 +55,10 @@ list(arr)[10] arr = [1.0] arr.extend([1,2,3]) arr.extend([]) -arr.extend("") # should ignore +arr.extend("") +arr.extend(list) # should ignore -#? float() int() +#? float() int() str() arr[100] a = set(arr) @@ -94,7 +95,7 @@ arr2[0] lst = [1] lst.append(1.0) s = set(lst) -s.add("") +s.add("ahh") lst = list(s) lst.append({}) diff --git a/test/completion/fstring.py b/test/completion/fstring.py index 52e81123..32f29e9b 100644 --- a/test/completion/fstring.py +++ b/test/completion/fstring.py @@ -25,3 +25,14 @@ Fr'sasdf' #? 7 str() Fr'''sasdf''' + '' + +#? ['upper'] +f'xyz'.uppe + + +#? 3 [] +f'f' + +# Github #1248 +#? int() +{"foo": 1}[f"foo"] diff --git a/test/completion/functions.py b/test/completion/functions.py index bc4eb200..585b692b 100644 --- a/test/completion/functions.py +++ b/test/completion/functions.py @@ -319,6 +319,7 @@ exe['c'] a = 'a' exe2 = kwargs_func(**{a:3, 'b':4.0}) + #? int() exe2['a'] #? float() @@ -326,6 +327,19 @@ exe2['b'] #? int() float() exe2['c'] +exe3 = kwargs_func(**{k: v for k, v in [(a, 3), ('b', 4.0)]}) + +# Should resolve to the same as 2 but jedi is not smart enough yet +# Here to make sure it doesn't result in crash though +#? +exe3['a'] + +#? +exe3['b'] + +#? +exe3['c'] + # ----------------- # *args / ** kwargs # ----------------- diff --git a/test/completion/generators.py b/test/completion/generators.py index 69e4acdb..ee541df1 100644 --- a/test/completion/generators.py +++ b/test/completion/generators.py @@ -220,7 +220,7 @@ def x(): # yield from # ----------------- -# python >= 3.3 +# python >= 3.4 def yield_from(): yield from iter([1]) diff --git a/test/completion/goto.py b/test/completion/goto.py index adff012d..f0889712 100644 --- a/test/completion/goto.py +++ b/test/completion/goto.py @@ -120,6 +120,8 @@ import_tree.a #! ['module mod1'] import import_tree.mod1 +#! ['module mod1'] +from import_tree.mod1 #! ['a = 1'] import_tree.mod1.a diff --git a/test/completion/imports.py b/test/completion/imports.py index 69e96cda..150f2743 100644 --- a/test/completion/imports.py +++ b/test/completion/imports.py @@ -114,6 +114,31 @@ def as_imports(): bar.a +def broken_import(): + import import_tree.mod1 + #? import_tree.mod1 + from import_tree.mod1 + + #? 25 import_tree.mod1 + import import_tree.mod1. + #? 25 import_tree.mod1 + impo5t import_tree.mod1.foo + #? 25 import_tree.mod1 + import import_tree.mod1.foo. + #? 31 import_tree.mod1 + import json, import_tree.mod1.foo. + + # Cases with ; + mod1 = 3 + #? 25 int() + import import_tree; mod1. + #? 38 import_tree.mod1 + import_tree; import import_tree.mod1. + + #! ['module json'] + from json + + def test_import_priorities(): """ It's possible to overwrite import paths in an ``__init__.py`` file, by diff --git a/test/completion/invalid.py b/test/completion/invalid.py index 7c047e66..0b81cc9a 100644 --- a/test/completion/invalid.py +++ b/test/completion/invalid.py @@ -143,12 +143,12 @@ a3[0] a = [for a in def break(): pass -#? +#? str() a[0] a = [a for a in [1,2] def break(): pass -#? +#? str() a[0] #? [] diff --git a/test/completion/keywords.py b/test/completion/keywords.py index 9631e8d6..fa9bf52d 100644 --- a/test/completion/keywords.py +++ b/test/completion/keywords.py @@ -18,13 +18,13 @@ b; continue b; continu #? [] -c + brea +c + pass #? [] -a + break +a + pass -#? ['break'] -b; break +#? ['pass'] +b; pass # ----------------- # Keywords should not appear everywhere. diff --git a/test/completion/pep0484_basic.py b/test/completion/pep0484_basic.py index 0230d375..64b69dc1 100644 --- a/test/completion/pep0484_basic.py +++ b/test/completion/pep0484_basic.py @@ -1,6 +1,6 @@ """ Pep-0484 type hinting """ -# python >= 3.2 +# python >= 3.4 class A(): @@ -68,7 +68,7 @@ def return_annotation_and_docstring() -> str: """ pass -#? str() int() +#? str() return_annotation_and_docstring() @@ -138,7 +138,7 @@ function_with_non_pep_0484_annotation(1, 2, 3, "force string") def function_forward_reference_dynamic( x: return_str_type(), y: "return_str_type()") -> None: - #? + #? str() x #? str() y diff --git a/test/completion/pep0484_typing.py b/test/completion/pep0484_typing.py index 123e6438..0590ccee 100644 --- a/test/completion/pep0484_typing.py +++ b/test/completion/pep0484_typing.py @@ -40,6 +40,8 @@ def we_can_has_sequence(p, q, r, s, t, u): t[1] #? ["append"] u.a + #? float() list() + u[1.0] #? float() u[1] @@ -114,13 +116,9 @@ def tuple(p, q, r): i, s, f = q #? int() i - ##? str() --- TODO fix support for tuple assignment - # https://github.com/davidhalter/jedi/pull/663#issuecomment-172317854 - #? + #? str() s - ##? float() --- TODO fix support for tuple assignment - # https://github.com/davidhalter/jedi/pull/663#issuecomment-172317854 - #? + #? float() f class Key: @@ -173,6 +171,21 @@ def mapping(p, q, d, dd, r, s, t): key #? Value() value + for key, value in q.items(): + #? Key() + key + #? Value() + value + for key, value in d.items(): + #? Key() + key + #? Value() + value + for key, value in dd.items(): + #? Key() + key + #? Value() + value for key in r: #? Key() key @@ -211,7 +224,7 @@ def optional(p): as being of that type. Jedi doesn't do anything with the extra into that it can be None as well """ - #? int() + #? int() None p class ForwardReference: @@ -243,7 +256,7 @@ for key in x.keys(): for value in x.values(): #? int() value -# python >= 3.2 +# python >= 3.4 class TestDefaultDict(typing.DefaultDict[str, int]): def setdud(self): @@ -271,7 +284,7 @@ for key in x.keys(): for value in x.values(): #? int() value -# python >= 3.2 +# python >= 3.4 """ @@ -292,3 +305,49 @@ from typing import Union as U def union4(x: U[int, str]): #? int() str() x + + +TYPE_VAR = typing.TypeVar('TYPE_VAR') +# TODO there should at least be some results. +#? [] +TYPE_VAR. +#! ["TYPE_VAR = typing.TypeVar('TYPE_VAR')"] +TYPE_VAR + + +class WithTypeVar(typing.Generic[TYPE_VAR]): + def lala(self) -> TYPE_VAR: + ... + + +def maaan(p: WithTypeVar[int]): + #? int() + p.lala() + + +if typing.TYPE_CHECKING: + with_type_checking = 1 +else: + without_type_checking = 1.0 +#? int() +with_type_checking +#? +without_type_checking + +def foo(a: typing.List, b: typing.Dict, c: typing.MutableMapping) -> typing.Type[int]: + #? ['append'] + a.appen + #? list() + a + #? + a[0] + #? ['setdefault'] + b.setd + #? ['setdefault'] + c.setd + #? typing.MutableMapping() + c + #? + c['asdf'] +#? int +foo() diff --git a/test/completion/pep0526_variables.py b/test/completion/pep0526_variables.py index ee848b6c..867fa54e 100644 --- a/test/completion/pep0526_variables.py +++ b/test/completion/pep0526_variables.py @@ -36,3 +36,18 @@ char: str for char in NOT_DEFINED: #? str() char + + +class Foo(): + bar: int + baz: typing.ClassVar[str] + + +#? int() +Foo.bar +#? int() +Foo().bar +#? str() +Foo.baz +#? str() +Foo().baz diff --git a/test/completion/precedence.py b/test/completion/precedence.py index 60781158..6e9c1eab 100644 --- a/test/completion/precedence.py +++ b/test/completion/precedence.py @@ -57,6 +57,11 @@ a #? int() str() (3 ** 'a') +class X(): + foo = 2 +#? int() +(X.foo ** 3) + # ----------------- # assignments # ----------------- diff --git a/test/completion/recursion.py b/test/completion/recursion.py index a3e7670b..ebbd69e3 100644 --- a/test/completion/recursion.py +++ b/test/completion/recursion.py @@ -76,3 +76,18 @@ class InstanceAttributeIfs: InstanceAttributeIfs().a1 #? int() str() InstanceAttributeIfs().a2 + + + +class A: + def a(self, b): + for x in [self.a(i) for i in b]: + #? + x + +class B: + def a(self, b): + for i in b: + for i in self.a(i): + #? + yield i diff --git a/test/completion/stdlib.py b/test/completion/stdlib.py index 30d7ce07..541e9024 100644 --- a/test/completion/stdlib.py +++ b/test/completion/stdlib.py @@ -25,7 +25,7 @@ next(reversed(yielder())) #? next(reversed()) -#? str() +#? str() bytes() next(open('')) #? int() @@ -34,6 +34,8 @@ next(open('')) # Compiled classes should have the meta class attributes. #? ['__itemsize__'] tuple.__itemsize__ +#? [] +tuple().__itemsize__ # ----------------- # type() calls with one parameter @@ -69,10 +71,15 @@ if os.path.isfile(): #? ['abspath'] fails = os.path.abspath +# The type vars and other underscored things from typeshed should not be +# findable. +#? +os._T + with open('foo') as f: for line in f.readlines(): - #? str() + #? str() bytes() line # ----------------- # enumerate @@ -101,9 +108,6 @@ for a in re.finditer('a', 'a'): #? int() a.start() -#? str() -re.sub('a', 'a') - # ----------------- # ref # ----------------- @@ -114,7 +118,7 @@ weakref.proxy(1) #? weakref.ref() weakref.ref(1) -#? int() +#? int() None weakref.ref(1)() # ----------------- @@ -165,10 +169,6 @@ import sqlite3 con = sqlite3.connect() #? sqlite3.Cursor() c = con.cursor() -#? sqlite3.Row() -row = c.fetchall()[0] -#? str() -row.keys()[0] def huhu(db): """ @@ -241,6 +241,31 @@ with contextlib.closing('asd') as string: #? str() string +# ----------------- +# operator +# ----------------- + +import operator + +f = operator.itemgetter(1) +#? float() +f([1.0]) +#? str() +f([1, '']) + +g = operator.itemgetter(1, 2) +x1, x2 = g([1, 1.0, '']) +#? float() +x1 +#? str() +x2 + +x1, x2 = g([1, '']) +#? str() +x1 +#? int() str() +x2 + # ----------------- # shlex # ----------------- @@ -249,5 +274,5 @@ with contextlib.closing('asd') as string: import shlex qsplit = shlex.split("foo, ferwerwerw werw werw e") for part in qsplit: - #? str() None + #? str() part diff --git a/test/completion/stub_folder/stub_only.pyi b/test/completion/stub_folder/stub_only.pyi new file mode 100644 index 00000000..4f2f4239 --- /dev/null +++ b/test/completion/stub_folder/stub_only.pyi @@ -0,0 +1 @@ +in_stub_only: int diff --git a/test/completion/stub_folder/stub_only_folder/__init__.pyi b/test/completion/stub_folder/stub_only_folder/__init__.pyi new file mode 100644 index 00000000..77cf2466 --- /dev/null +++ b/test/completion/stub_folder/stub_only_folder/__init__.pyi @@ -0,0 +1 @@ +in_stub_only_folder: int diff --git a/test/completion/stub_folder/stub_only_folder/nested_stub_only.pyi b/test/completion/stub_folder/stub_only_folder/nested_stub_only.pyi new file mode 100644 index 00000000..4f2f4239 --- /dev/null +++ b/test/completion/stub_folder/stub_only_folder/nested_stub_only.pyi @@ -0,0 +1 @@ +in_stub_only: int diff --git a/test/completion/stub_folder/stub_only_folder/nested_with_stub.py b/test/completion/stub_folder/stub_only_folder/nested_with_stub.py new file mode 100644 index 00000000..0f9111f4 --- /dev/null +++ b/test/completion/stub_folder/stub_only_folder/nested_with_stub.py @@ -0,0 +1,2 @@ +in_python = '' +in_both = '' diff --git a/test/completion/stub_folder/stub_only_folder/nested_with_stub.pyi b/test/completion/stub_folder/stub_only_folder/nested_with_stub.pyi new file mode 100644 index 00000000..53dc2653 --- /dev/null +++ b/test/completion/stub_folder/stub_only_folder/nested_with_stub.pyi @@ -0,0 +1,2 @@ +in_stub: int +in_both: float diff --git a/test/completion/stub_folder/stub_only_folder/python_only.py b/test/completion/stub_folder/stub_only_folder/python_only.py new file mode 100644 index 00000000..23370e0c --- /dev/null +++ b/test/completion/stub_folder/stub_only_folder/python_only.py @@ -0,0 +1 @@ +in_python = '' diff --git a/test/completion/stub_folder/with_stub.py b/test/completion/stub_folder/with_stub.py new file mode 100644 index 00000000..7f064dca --- /dev/null +++ b/test/completion/stub_folder/with_stub.py @@ -0,0 +1,2 @@ +in_with_stub_both = 5 +in_with_stub_python = 8 diff --git a/test/completion/stub_folder/with_stub.pyi b/test/completion/stub_folder/with_stub.pyi new file mode 100644 index 00000000..7a3f3ecf --- /dev/null +++ b/test/completion/stub_folder/with_stub.pyi @@ -0,0 +1,2 @@ +in_with_stub_both: str +in_with_stub_stub: float diff --git a/test/completion/stub_folder/with_stub_folder/__init__.py b/test/completion/stub_folder/with_stub_folder/__init__.py new file mode 100644 index 00000000..4201289b --- /dev/null +++ b/test/completion/stub_folder/with_stub_folder/__init__.py @@ -0,0 +1,2 @@ +in_with_stub_both_folder = 5 +in_with_stub_python_folder = 8 diff --git a/test/completion/stub_folder/with_stub_folder/__init__.pyi b/test/completion/stub_folder/with_stub_folder/__init__.pyi new file mode 100644 index 00000000..ea7ec38c --- /dev/null +++ b/test/completion/stub_folder/with_stub_folder/__init__.pyi @@ -0,0 +1,2 @@ +in_with_stub_both_folder: str +in_with_stub_stub_folder: float diff --git a/test/completion/stub_folder/with_stub_folder/nested_stub_only.pyi b/test/completion/stub_folder/with_stub_folder/nested_stub_only.pyi new file mode 100644 index 00000000..4f2f4239 --- /dev/null +++ b/test/completion/stub_folder/with_stub_folder/nested_stub_only.pyi @@ -0,0 +1 @@ +in_stub_only: int diff --git a/test/completion/stub_folder/with_stub_folder/nested_with_stub.py b/test/completion/stub_folder/with_stub_folder/nested_with_stub.py new file mode 100644 index 00000000..0f9111f4 --- /dev/null +++ b/test/completion/stub_folder/with_stub_folder/nested_with_stub.py @@ -0,0 +1,2 @@ +in_python = '' +in_both = '' diff --git a/test/completion/stub_folder/with_stub_folder/nested_with_stub.pyi b/test/completion/stub_folder/with_stub_folder/nested_with_stub.pyi new file mode 100644 index 00000000..53dc2653 --- /dev/null +++ b/test/completion/stub_folder/with_stub_folder/nested_with_stub.pyi @@ -0,0 +1,2 @@ +in_stub: int +in_both: float diff --git a/test/completion/stub_folder/with_stub_folder/python_only.py b/test/completion/stub_folder/with_stub_folder/python_only.py new file mode 100644 index 00000000..23370e0c --- /dev/null +++ b/test/completion/stub_folder/with_stub_folder/python_only.py @@ -0,0 +1 @@ +in_python = '' diff --git a/test/completion/stubs.py b/test/completion/stubs.py new file mode 100644 index 00000000..83013542 --- /dev/null +++ b/test/completion/stubs.py @@ -0,0 +1,104 @@ +# python >= 3.4 +from stub_folder import with_stub, stub_only, with_stub_folder, stub_only_folder + +# ------------------------- +# Just files +# ------------------------- + +#? int() +stub_only.in_stub_only +#? str() +with_stub.in_with_stub_both +#? int() +with_stub.in_with_stub_python +#? float() +with_stub.in_with_stub_stub + +#! ['in_stub_only: int'] +stub_only.in_stub_only +#! ['in_with_stub_both = 5'] +with_stub.in_with_stub_both +#! ['in_with_stub_python = 8'] +with_stub.in_with_stub_python +#! ['in_with_stub_stub: float'] +with_stub.in_with_stub_stub + +#? ['in_stub_only'] +stub_only.in_ +#? ['in_stub_only'] +from stub_folder.stub_only import in_ +#? ['in_with_stub_both', 'in_with_stub_python', 'in_with_stub_stub'] +with_stub.in_ +#? ['in_with_stub_both', 'in_with_stub_python', 'in_with_stub_stub'] +from stub_folder.with_stub import in_ + +#? ['with_stub', 'stub_only', 'with_stub_folder', 'stub_only_folder'] +from stub_folder. + +# ------------------------- +# Folders +# ------------------------- + +#? int() +stub_only_folder.in_stub_only_folder +#? str() +with_stub_folder.in_with_stub_both_folder +#? int() +with_stub_folder.in_with_stub_python_folder +#? float() +with_stub_folder.in_with_stub_stub_folder + +#? ['in_stub_only_folder'] +stub_only_folder.in_ +#? ['in_with_stub_both_folder', 'in_with_stub_python_folder', 'in_with_stub_stub_folder'] +with_stub_folder.in_ + +# ------------------------- +# Folders nested with stubs +# ------------------------- + +from stub_folder.with_stub_folder import nested_stub_only, nested_with_stub, \ + python_only + +#? int() +nested_stub_only.in_stub_only +#? float() +nested_with_stub.in_both +#? str() +nested_with_stub.in_python +#? int() +nested_with_stub.in_stub +#? str() +python_only.in_python + +#? ['in_stub_only_folder'] +stub_only_folder.in_ +#? ['in_with_stub_both_folder', 'in_with_stub_python_folder', 'in_with_stub_stub_folder'] +with_stub_folder.in_ +#? ['in_python'] +python_only.in_ + +# ------------------------- +# Folders nested with stubs +# ------------------------- + +from stub_folder.stub_only_folder import nested_stub_only, nested_with_stub, \ + python_only + +#? int() +nested_stub_only.in_stub_only +#? float() +nested_with_stub.in_both +#? str() +nested_with_stub.in_python +#? int() +nested_with_stub.in_stub +#? str() +python_only.in_python + +#? ['in_stub_only'] +nested_stub_only.in_ +#? ['in_both', 'in_python', 'in_stub'] +nested_with_stub.in_ +#? ['in_python'] +python_only.in_ diff --git a/test/completion/types.py b/test/completion/types.py index 19621b29..753af6c3 100644 --- a/test/completion/types.py +++ b/test/completion/types.py @@ -131,3 +131,58 @@ set_t2 = set() #? ['clear', 'copy'] set_t2.c + +# ----------------- +# pep 448 unpacking generalizations +# ----------------- +# python >= 3.5 + +d = {'a': 3} +dc = {v: 3 for v in ['a']} + +#? dict() +{**d} + +#? dict() +{**dc} + +#? str() +{**d, "b": "b"}["b"] + +#? str() +{**dc, "b": "b"}["b"] + +# Should resolve to int() but jedi is not smart enough yet +# Here to make sure it doesn't result in crash though +#? +{**d}["a"] + +# Should resolve to int() but jedi is not smart enough yet +# Here to make sure it doesn't result in crash though +#? +{**dc}["a"] + +s = {1, 2, 3} + +#? set() +{*s} + +#? set() +{*s, 4, *s} + +s = {1, 2, 3} +# Should resolve to int() but jedi is not smart enough yet +# Here to make sure it doesn't result in crash though +#? +{*s}.pop() + +#? int() +{*s, 4}.pop() + +# Should resolve to int() but jedi is not smart enough yet +# Here to make sure it doesn't result in crash though +#? +[*s][0] + +#? int() +[*s, 4][0] diff --git a/test/completion/usages.py b/test/completion/usages.py index 1fdbc3fc..3e05b7b5 100644 --- a/test/completion/usages.py +++ b/test/completion/usages.py @@ -265,7 +265,10 @@ check(DynamicParam()) import _sre -#< 0 (-3,7), (0,0), ('_sre', None, None) +# TODO reenable this, it's currently not working, because of 2/3 +# inconsistencies in typeshed (_sre exists in typeshed/2, but not in +# typeshed/3). +##< 0 (-3,7), (0,0), ('_sre', None, None) _sre # ----------------- diff --git a/test/conftest.py b/test/conftest.py index 0a5598e3..7291600a 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -126,11 +126,12 @@ class StaticAnalysisCase(object): return "<%s: %s>" % (self.__class__.__name__, os.path.basename(self._path)) -@pytest.fixture() -def venv_path(tmpdir, environment): +@pytest.fixture(scope='session') +def venv_path(tmpdir_factory, environment): if environment.version_info.major < 3: pytest.skip("python -m venv does not exist in Python 2") + tmpdir = tmpdir_factory.mktemp('venv_path') dirname = os.path.join(tmpdir.dirname, 'venv') # We cannot use the Python from tox because tox creates virtualenvs and diff --git a/test/examples/issue1209/__init__.py b/test/examples/issue1209/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/examples/issue1209/api/__init__.py b/test/examples/issue1209/api/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/examples/issue1209/api/whatever/__init__.py b/test/examples/issue1209/api/whatever/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/examples/issue1209/api/whatever/api_test1.py b/test/examples/issue1209/api/whatever/api_test1.py new file mode 100644 index 00000000..e69de29b diff --git a/test/examples/issue1209/whatever/__init__.py b/test/examples/issue1209/whatever/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/examples/issue1209/whatever/test.py b/test/examples/issue1209/whatever/test.py new file mode 100644 index 00000000..e69de29b diff --git a/test/examples/namespace_package_relative_import/rel1.py b/test/examples/namespace_package_relative_import/rel1.py new file mode 100644 index 00000000..79b35a58 --- /dev/null +++ b/test/examples/namespace_package_relative_import/rel1.py @@ -0,0 +1 @@ +from .rel2 import name diff --git a/test/examples/namespace_package_relative_import/rel2.py b/test/examples/namespace_package_relative_import/rel2.py new file mode 100644 index 00000000..14a0ee4a --- /dev/null +++ b/test/examples/namespace_package_relative_import/rel2.py @@ -0,0 +1 @@ +name = 1 diff --git a/test/examples/stub_packages/no_python-stubs/__init__.pyi b/test/examples/stub_packages/no_python-stubs/__init__.pyi new file mode 100644 index 00000000..18770a10 --- /dev/null +++ b/test/examples/stub_packages/no_python-stubs/__init__.pyi @@ -0,0 +1 @@ +foo: int diff --git a/test/examples/stub_packages/with_python-stubs/__init__.pyi b/test/examples/stub_packages/with_python-stubs/__init__.pyi new file mode 100644 index 00000000..5e7ee1b5 --- /dev/null +++ b/test/examples/stub_packages/with_python-stubs/__init__.pyi @@ -0,0 +1,2 @@ +both: int +stub_only: str diff --git a/test/examples/stub_packages/with_python-stubs/module.pyi b/test/examples/stub_packages/with_python-stubs/module.pyi new file mode 100644 index 00000000..53331495 --- /dev/null +++ b/test/examples/stub_packages/with_python-stubs/module.pyi @@ -0,0 +1 @@ +in_sub_module: int diff --git a/test/examples/stub_packages/with_python/__init__.py b/test/examples/stub_packages/with_python/__init__.py new file mode 100644 index 00000000..d16c270e --- /dev/null +++ b/test/examples/stub_packages/with_python/__init__.py @@ -0,0 +1,2 @@ +python_only = 1 +both = '' diff --git a/test/examples/stub_packages/with_python/module.py b/test/examples/stub_packages/with_python/module.py new file mode 100644 index 00000000..375009f8 --- /dev/null +++ b/test/examples/stub_packages/with_python/module.py @@ -0,0 +1 @@ +in_sub_module = '' diff --git a/test/run.py b/test/run.py index 611fba6e..6ed0dbec 100755 --- a/test/run.py +++ b/test/run.py @@ -126,6 +126,7 @@ from jedi.api.classes import Definition from jedi.api.completion import get_user_scope from jedi import parser_utils from jedi.api.environment import get_default_environment, get_system_environment +from jedi.evaluate.gradual.conversion import try_stubs_to_actual_context_set TEST_COMPLETIONS = 0 @@ -230,7 +231,10 @@ class IntegrationTestCase(object): if user_context.api_type == 'function': user_context = user_context.get_function_execution() element.parent = user_context.tree_node - results = evaluator.eval_element(user_context, element) + results = try_stubs_to_actual_context_set( + evaluator.eval_element(user_context, element), + prefer_stub_to_compiled=True + ) if not results: raise Exception('Could not resolve %s on line %s' % (match.string, self.line_nr - 1)) @@ -412,7 +416,6 @@ if __name__ == '__main__': dir_ = os.path.dirname(os.path.realpath(__file__)) completion_test_dir = os.path.join(dir_, '../test/completion') completion_test_dir = os.path.abspath(completion_test_dir) - summary = [] tests_fail = 0 # execute tests @@ -422,9 +425,11 @@ if __name__ == '__main__': cases += collect_dir_tests(completion_test_dir, test_files, True) def file_change(current, tests, fails): - if current is not None: + if current is None: + current = '' + else: current = os.path.basename(current) - print('%s \t\t %s tests and %s fails.' % (current, tests, fails)) + print('{:25} {} tests and {} fails.'.format(current, tests, fails)) def report(case, actual, desired): if actual == desired: @@ -470,8 +475,6 @@ if __name__ == '__main__': print('\nSummary: (%s fails of %s tests) in %.3fs' % (tests_fail, len(cases), time.time() - t_start)) - for s in summary: - print(s) exit_code = 1 if tests_fail else 0 sys.exit(exit_code) diff --git a/test/static_analysis/attribute_error.py b/test/static_analysis/attribute_error.py index 4b084c11..7ceb9397 100644 --- a/test/static_analysis/attribute_error.py +++ b/test/static_analysis/attribute_error.py @@ -111,9 +111,3 @@ import import_tree import_tree.a import_tree.b - -# This is something that raised an error, because it was using a complex -# mixture of Jedi fakes and compiled objects. -import _sre -#! 15 attribute-error -_sre.compile().not_existing diff --git a/test/static_analysis/operations.py b/test/static_analysis/operations.py index 05e1406c..bca27c6a 100644 --- a/test/static_analysis/operations.py +++ b/test/static_analysis/operations.py @@ -6,8 +6,9 @@ 1 - '1' -1 - - 1 --1 - int() -int() - float() +# TODO uncomment +#-1 - int() +#int() - float() float() - 3.0 a = 3 diff --git a/test/static_analysis/try_except.py b/test/static_analysis/try_except.py index e6543280..540ba724 100644 --- a/test/static_analysis/try_except.py +++ b/test/static_analysis/try_except.py @@ -87,3 +87,21 @@ else: str.upper #! 4 attribute-error str.undefined + +# ----------------- +# arguments +# ----------------- + +def i_see(r): + return r + +def lala(): + # This weird structure checks if the error is actually resolved in the + # right place. + a = TypeError + try: + i_see() + except a: + pass + #! 5 type-error-too-few-arguments + i_see() diff --git a/test/test_api/test_api.py b/test/test_api/test_api.py index 47c87833..b770fd59 100644 --- a/test/test_api/test_api.py +++ b/test/test_api/test_api.py @@ -5,17 +5,25 @@ Test all things related to the ``jedi.api`` module. import os from textwrap import dedent -from jedi import preload_module -from jedi._compatibility import is_py3 from pytest import raises from parso import cache +from jedi import preload_module +from jedi.evaluate.gradual import typeshed + def test_preload_modules(): def check_loaded(*modules): + for grammar_cache in cache.parser_cache.values(): + if None in grammar_cache: + break + # Filter the typeshed parser cache. + typeshed_cache_count = sum( + 1 for path in grammar_cache + if path is not None and path.startswith(typeshed.TYPESHED_PATH) + ) # +1 for None module (currently used) - grammar_cache = next(iter(cache.parser_cache.values())) - assert len(grammar_cache) == len(modules) + 1 + assert len(grammar_cache) - typeshed_cache_count == len(modules) + 1 for i in modules: assert [i in k for k in grammar_cache.keys() if k is not None] @@ -111,11 +119,7 @@ def test_goto_assignments_on_non_name(Script, environment): assert Script('for').goto_assignments() == [] assert Script('assert').goto_assignments() == [] - if environment.version_info.major == 2: - # In Python 2.7 True is still a name. - assert Script('True').goto_assignments()[0].description == 'instance True' - else: - assert Script('True').goto_assignments() == [] + assert Script('True').goto_assignments() == [] def test_goto_definitions_on_non_name(Script): @@ -199,9 +203,9 @@ def test_goto_assignments_follow_imports(Script): assert definition.name == 'p' result, = definition.goto_assignments() assert result.name == 'p' - result, = definition._goto_definitions() + result, = definition.infer() assert result.name == 'int' - result, = result._goto_definitions() + result, = result.infer() assert result.name == 'int' definition, = script.goto_assignments() @@ -285,3 +289,11 @@ def test_backslash_continuation_and_bracket(Script): column = lines[-1].index('(') def_, = Script(code, line=len(lines), column=column).goto_definitions() assert def_.name == 'int' + + +def test_goto_follow_builtin_imports(Script): + s = Script('import sys; sys') + d, = s.goto_assignments(follow_imports=True) + assert d.in_builtin_module() is True + d, = s.goto_assignments(follow_imports=True, follow_builtin_imports=True) + assert d.in_builtin_module() is True diff --git a/test/test_api/test_api_classes_follow_definition.py b/test/test_api/test_api_classes_follow_definition.py index 69bb31d0..1b6cf793 100644 --- a/test/test_api/test_api_classes_follow_definition.py +++ b/test/test_api/test_api_classes_follow_definition.py @@ -18,7 +18,7 @@ def check_follow_definition_types(Script, source): return [d.type for d in defs] -def test_follow_import_incomplete(Script): +def test_follow_import_incomplete(Script, environment): """ Completion on incomplete imports should always take the full completion to do any evaluation. @@ -34,8 +34,10 @@ def test_follow_import_incomplete(Script): # incomplete `from * import` part datetime = check_follow_definition_types(Script, "from datetime import datetim") - assert set(datetime) == {'class', 'instance'} # py33: builtin and pure py version - + if environment.version_info.major == 2: + assert datetime == ['class'] + else: + assert set(datetime) == {'class', 'instance'} # py3: builtin and pure py version # os.path check ospath = check_follow_definition_types(Script, "from os.path import abspat") assert ospath == ['function'] diff --git a/test/test_api/test_call_signatures.py b/test/test_api/test_call_signatures.py index 77a58005..28cb619d 100644 --- a/test/test_api/test_call_signatures.py +++ b/test/test_api/test_call_signatures.py @@ -6,7 +6,7 @@ import pytest from ..helpers import TestCase from jedi import cache -from jedi._compatibility import is_py33 +from jedi.parser_utils import get_call_signature def assert_signature(Script, source, expected_name, expected_index=0, line=None, column=None): @@ -24,7 +24,7 @@ def assert_signature(Script, source, expected_name, expected_index=0, line=None, def test_valid_call(Script): - assert_signature(Script, 'str()', 'str', column=4) + assert_signature(Script, 'bool()', 'bool', column=5) class TestCallSignatures(TestCase): @@ -39,12 +39,12 @@ class TestCallSignatures(TestCase): run = self._run_simple # simple - s1 = "sorted(a, str(" + s1 = "sorted(a, bool(" run(s1, 'sorted', 0, 7) run(s1, 'sorted', 1, 9) run(s1, 'sorted', 1, 10) run(s1, 'sorted', 1, 11) - run(s1, 'str', 0, 14) + run(s1, 'bool', 0, 15) s2 = "abs(), " run(s2, 'abs', 0, 4) @@ -58,26 +58,26 @@ class TestCallSignatures(TestCase): def test_more_complicated(self): run = self._run_simple - s4 = 'abs(zip(), , set,' + s4 = 'abs(bool(), , set,' run(s4, None, column=3) run(s4, 'abs', 0, 4) - run(s4, 'zip', 0, 8) - run(s4, 'abs', 0, 9) - run(s4, 'abs', None, 10) + run(s4, 'bool', 0, 9) + run(s4, 'abs', 0, 10) + run(s4, 'abs', None, 11) s5 = "sorted(1,\nif 2:\n def a():" run(s5, 'sorted', 0, 7) run(s5, 'sorted', 1, 9) - s6 = "str().center(" - run(s6, 'center', 0) - run(s6, 'str', 0, 4) + s6 = "bool().__eq__(" + run(s6, '__eq__', 0) + run(s6, 'bool', 0, 5) s7 = "str().upper().center(" - s8 = "str(int[zip(" + s8 = "bool(int[abs(" run(s7, 'center', 0) - run(s8, 'zip', 0) - run(s8, 'str', 0, 8) + run(s8, 'abs', 0) + run(s8, 'bool', 0, 10) run("import time; abc = time; abc.sleep(", 'sleep', 0) @@ -87,15 +87,19 @@ class TestCallSignatures(TestCase): "func(alpha='101'," self._run_simple(s, 'func', 0, column=13, line=2) - def test_flows(self): - # jedi-vim #9 - self._run_simple("with open(", 'open', 0) - + def test_for(self): # jedi-vim #11 self._run_simple("for sorted(", 'sorted', 0) self._run_simple("for s in sorted(", 'sorted', 0) +def test_with(Script): + # jedi-vim #9 + sigs = Script("with open(").call_signatures() + assert sigs + assert all(sig.name == 'open' for sig in sigs) + + def test_call_signatures_empty_parentheses_pre_space(Script): s = dedent("""\ def f(a, b): @@ -150,22 +154,22 @@ def test_decorator_in_class(Script): def test_additional_brackets(Script): - assert_signature(Script, 'str((', 'str', 0) + assert_signature(Script, 'abs((', 'abs', 0) def test_unterminated_strings(Script): - assert_signature(Script, 'str(";', 'str', 0) + assert_signature(Script, 'abs(";', 'abs', 0) def test_whitespace_before_bracket(Script): - assert_signature(Script, 'str (', 'str', 0) - assert_signature(Script, 'str (";', 'str', 0) - assert_signature(Script, 'str\n(', None) + assert_signature(Script, 'abs (', 'abs', 0) + assert_signature(Script, 'abs (";', 'abs', 0) + assert_signature(Script, 'abs\n(', None) def test_brackets_in_string_literals(Script): - assert_signature(Script, 'str (" (', 'str', 0) - assert_signature(Script, 'str (" )', 'str', 0) + assert_signature(Script, 'abs (" (', 'abs', 0) + assert_signature(Script, 'abs (" )', 'abs', 0) def test_function_definitions_should_break(Script): @@ -173,8 +177,8 @@ def test_function_definitions_should_break(Script): Function definitions (and other tokens that cannot exist within call signatures) should break and not be able to return a call signature. """ - assert_signature(Script, 'str(\ndef x', 'str', 0) - assert not Script('str(\ndef x(): pass').call_signatures() + assert_signature(Script, 'abs(\ndef x', 'abs', 0) + assert not Script('abs(\ndef x(): pass').call_signatures() def test_flow_call(Script): @@ -211,7 +215,7 @@ def test_call_signature_on_module(Script): assert Script(s).call_signatures() == [] -def test_complex(Script): +def test_complex(Script, environment): s = """ def abc(a,b): pass @@ -229,7 +233,19 @@ def test_complex(Script): re.compile( return it * 2 """ - assert_signature(Script, s, 'compile', 0, line=4, column=27) + sig1, sig2 = sorted(Script(s, line=4, column=27).call_signatures(), key=lambda s: s.line) + assert sig1.name == sig2.name == 'compile' + assert sig1.index == sig2.index == 0 + func1, = sig1._name.infer() + func2, = sig2._name.infer() + + if environment.version_info.major == 3: + # Do these checks just for Python 3, I'm too lazy to deal with this + # legacy stuff. ~ dave. + assert get_call_signature(func1.tree_node) \ + == 'compile(pattern: AnyStr, flags: _FlagsType = ...) -> Pattern[AnyStr]' + assert get_call_signature(func2.tree_node) \ + == 'compile(pattern: Pattern[AnyStr], flags: _FlagsType = ...) ->\nPattern[AnyStr]' # jedi-vim #70 s = """def foo(""" @@ -246,19 +262,24 @@ def _params(Script, source, line=None, column=None): return signatures[0].params -def test_param_name(Script): - if not is_py33: - p = _params(Script, '''int(''') - # int is defined as: `int(x[, base])` - assert p[0].name == 'x' - # `int` docstring has been redefined: - # http://bugs.python.org/issue14783 - # TODO have multiple call signatures for int (like in the docstr) - #assert p[1].name == 'base' +def test_int_params(Script): + sig1, sig2 = Script('int(').call_signatures() + # int is defined as: `int(x[, base])` + assert len(sig1.params) == 2 + assert sig1.params[0].name == 'x' + assert sig1.params[1].name == 'base' + assert len(sig2.params) == 1 + assert sig2.params[0].name == 'x' - p = _params(Script, '''open(something,''') - assert p[0].name in ['file', 'name'] - assert p[1].name == 'mode' + +def test_param_name(Script): + sigs = Script('open(something,').call_signatures() + for sig in sigs: + # All of the signatures (in Python the function is overloaded), + # contain the same param names. + assert sig.params[0].name in ['file', 'name'] + assert sig.params[1].name == 'mode' + assert sig.params[2].name == 'buffering' def test_builtins(Script): @@ -286,18 +307,16 @@ def test_signature_is_definition(Script): # Now compare all the attributes that a CallSignature must also have. for attr_name in dir(definition): - dont_scan = ['defined_names', 'parent', 'goto_assignments', 'params'] + dont_scan = ['defined_names', 'parent', 'goto_assignments', 'infer', 'params'] if attr_name.startswith('_') or attr_name in dont_scan: continue - # Might trigger some deprecation warnings. - with warnings.catch_warnings(record=True): - attribute = getattr(definition, attr_name) - signature_attribute = getattr(signature, attr_name) - if inspect.ismethod(attribute): - assert attribute() == signature_attribute() - else: - assert attribute == signature_attribute + attribute = getattr(definition, attr_name) + signature_attribute = getattr(signature, attr_name) + if inspect.ismethod(attribute): + assert attribute() == signature_attribute() + else: + assert attribute == signature_attribute def test_no_signature(Script): @@ -381,7 +400,7 @@ def test_bracket_start(Script): assert len(signatures) == 1 return signatures[0].bracket_start - assert bracket_start('str(') == (1, 3) + assert bracket_start('abs(') == (1, 3) def test_different_caller(Script): @@ -390,11 +409,11 @@ def test_different_caller(Script): index and then get the call signature of it. """ - assert_signature(Script, '[str][0](', 'str', 0) - assert_signature(Script, '[str][0]()', 'str', 0, column=len('[str][0](')) + assert_signature(Script, '[abs][0](', 'abs', 0) + assert_signature(Script, '[abs][0]()', 'abs', 0, column=len('[abs][0](')) - assert_signature(Script, '(str)(', 'str', 0) - assert_signature(Script, '(str)()', 'str', 0, column=len('(str)(')) + assert_signature(Script, '(abs)(', 'abs', 0) + assert_signature(Script, '(abs)()', 'abs', 0, column=len('(abs)(')) def test_in_function(Script): @@ -415,20 +434,28 @@ def test_lambda_params(Script): assert [p.name for p in sig.params] == ['x'] +CLASS_CODE = dedent('''\ +class X(): + def __init__(self, foo, bar): + self.foo = foo +''') + + def test_class_creation(Script): - code = dedent('''\ - class X(): - def __init__(self, foo, bar): - self.foo = foo - ''') - sig, = Script(code + 'X(').call_signatures() + + sig, = Script(CLASS_CODE + 'X(').call_signatures() assert sig.index == 0 assert sig.name == 'X' assert [p.name for p in sig.params] == ['foo', 'bar'] - sig, = Script(code + 'X.__init__(').call_signatures() + +def test_call_init_on_class(Script): + sig, = Script(CLASS_CODE + 'X.__init__(').call_signatures() assert [p.name for p in sig.params] == ['self', 'foo', 'bar'] - sig, = Script(code + 'X().__init__(').call_signatures() + + +def test_call_init_on_instance(Script): + sig, = Script(CLASS_CODE + 'X().__init__(').call_signatures() assert [p.name for p in sig.params] == ['foo', 'bar'] diff --git a/test/test_api/test_classes.py b/test/test_api/test_classes.py index d320966a..e2148018 100644 --- a/test/test_api/test_classes.py +++ b/test/test_api/test_classes.py @@ -8,7 +8,6 @@ import pytest import jedi from jedi import __doc__ as jedi_doc, names -from ..helpers import cwd_at from ..helpers import TestCase @@ -66,27 +65,31 @@ def test_basedefinition_type(Script, environment): 'generator', 'statement', 'import', 'param') -def test_basedefinition_type_import(Script): - def get_types(source, **kwargs): - return {t.type for t in Script(source, **kwargs).completions()} +@pytest.mark.parametrize( + ('src', 'expected_result', 'column'), [ + # import one level + ('import t', 'module', None), + ('import ', 'module', None), + ('import datetime; datetime', 'module', None), - # import one level - assert get_types('import t') == {'module'} - assert get_types('import ') == {'module'} - assert get_types('import datetime; datetime') == {'module'} + # from + ('from datetime import timedelta', 'class', None), + ('from datetime import timedelta; timedelta', 'class', None), + ('from json import tool', 'module', None), + ('from json import tool; tool', 'module', None), - # from - assert get_types('from datetime import timedelta') == {'class'} - assert get_types('from datetime import timedelta; timedelta') == {'class'} - assert get_types('from json import tool') == {'module'} - assert get_types('from json import tool; tool') == {'module'} + # import two levels + ('import json.tool; json', 'module', None), + ('import json.tool; json.tool', 'module', None), + ('import json.tool; json.tool.main', 'function', None), + ('import json.tool', 'module', None), + ('import json.tool', 'module', 9), + ] - # import two levels - assert get_types('import json.tool; json') == {'module'} - assert get_types('import json.tool; json.tool') == {'module'} - assert get_types('import json.tool; json.tool.main') == {'function'} - assert get_types('import json.tool') == {'module'} - assert get_types('import json.tool', column=9) == {'module'} +) +def test_basedefinition_type_import(Script, src, expected_result, column): + types = {t.type for t in Script(src, column=column).completions()} + assert types == {expected_result} def test_function_call_signature_in_doc(Script): @@ -99,7 +102,7 @@ def test_function_call_signature_in_doc(Script): def test_param_docstring(): - param = jedi.names("def test(parameter): pass")[1] + param = jedi.names("def test(parameter): pass", all_scopes=True)[1] assert param.name == 'parameter' assert param.docstring() == '' @@ -111,13 +114,14 @@ def test_class_call_signature(Script): pass Foo""").goto_definitions() doc = defs[0].docstring() - assert "Foo(self, x, y=1, z='a')" in str(doc) + assert doc == "Foo(x, y=1, z='a')" def test_position_none_if_builtin(Script): gotos = Script('import sys; sys.path').goto_assignments() - assert gotos[0].line is None - assert gotos[0].column is None + assert gotos[0].in_builtin_module() + assert gotos[0].line is not None + assert gotos[0].column is not None def test_completion_docstring(Script, jedi_path): @@ -174,9 +178,9 @@ def test_hashlib_params(Script, environment): if environment.version_info < (3,): pytest.skip() - script = Script(source='from hashlib import ', line=1, column=20) - c = script.completions() - assert c[2].params + script = Script(source='from hashlib import sha256') + c, = script.completions() + assert [p.name for p in c.params] == ['arg'] def test_signature_params(Script): @@ -291,10 +295,10 @@ def test_parent_on_completion(Script): def test_type(Script): for c in Script('a = [str()]; a[0].').completions(): - if c.name == '__class__': + if c.name == '__class__' and False: # TODO fix. assert c.type == 'class' else: - assert c.type in ('function', 'instance') + assert c.type in ('function', 'statement') for c in Script('list.').completions(): assert c.type diff --git a/test/test_api/test_completion.py b/test/test_api/test_completion.py index a40095b9..01aeddf3 100644 --- a/test/test_api/test_completion.py +++ b/test/test_api/test_completion.py @@ -29,7 +29,7 @@ def test_in_empty_space(Script): comps = Script(code, 3, 7).completions() self, = [c for c in comps if c.name == 'self'] assert self.name == 'self' - def_, = self._goto_definitions() + def_, = self.infer() assert def_.name == 'X' @@ -132,7 +132,6 @@ def test_async(Script, environment): hey = 3 ho''' ) - print(code) comps = Script(code, column=4).completions() names = [c.name for c in comps] assert 'foo' in names diff --git a/test/test_api/test_defined_names.py b/test/test_api/test_defined_names.py index 81b7c5b6..3386df3f 100644 --- a/test/test_api/test_defined_names.py +++ b/test/test_api/test_defined_names.py @@ -79,6 +79,59 @@ class TestDefinedNames(TestCase): self.assert_definition_names(subsubdefs, ['L3', 'f']) self.assert_definition_names(subsubdefs[0].defined_names(), ['f']) + def test_class_fields_with_all_scopes_false(self): + definitions = self.check_defined_names(""" + from module import f + g = f(f) + class C: + h = g + + def foo(x=a): + bar = x + return bar + """, ['f', 'g', 'C', 'foo']) + C_subdefs = definitions[-2].defined_names() + foo_subdefs = definitions[-1].defined_names() + self.assert_definition_names(C_subdefs, ['h']) + self.assert_definition_names(foo_subdefs, ['x', 'bar']) + + def test_async_stmt_with_all_scopes_false(self): + definitions = self.check_defined_names(""" + from module import f + import asyncio + + g = f(f) + class C: + h = g + def __init__(self): + pass + + async def __aenter__(self): + pass + + def foo(x=a): + bar = x + return bar + + async def async_foo(duration): + async def wait(): + await asyncio.sleep(100) + for i in range(duration//100): + await wait() + return duration//100*100 + + async with C() as cinst: + d = cinst + """, ['f', 'asyncio', 'g', 'C', 'foo', 'async_foo', 'cinst', 'd']) + C_subdefs = definitions[3].defined_names() + foo_subdefs = definitions[4].defined_names() + async_foo_subdefs = definitions[5].defined_names() + cinst_subdefs = definitions[6].defined_names() + self.assert_definition_names(C_subdefs, ['h', '__init__', '__aenter__']) + self.assert_definition_names(foo_subdefs, ['x', 'bar']) + self.assert_definition_names(async_foo_subdefs, ['duration', 'wait', 'i']) + # We treat d as a name outside `async with` block + self.assert_definition_names(cinst_subdefs, []) def test_follow_imports(environment): # github issue #344 @@ -96,3 +149,24 @@ def test_names_twice(environment): defs = names(source=source, environment=environment) assert defs[0].defined_names() == [] + + +def test_simple_name(environment): + defs = names('foo', references=True, environment=environment) + assert not defs[0]._name.infer() + + +def test_no_error(environment): + code = dedent(""" + def foo(a, b): + if a == 10: + if b is None: + print("foo") + a = 20 + """) + func_name, = names(code) + a, b, a20 = func_name.defined_names() + assert a.name == 'a' + assert b.name == 'b' + assert a20.name == 'a' + assert a20.goto_assignments() == [a20] diff --git a/test/test_api/test_environment.py b/test/test_api/test_environment.py index 395e2f5e..2d774a8e 100644 --- a/test/test_api/test_environment.py +++ b/test/test_api/test_environment.py @@ -1,6 +1,5 @@ import os import sys -from contextlib import contextmanager import pytest @@ -8,7 +7,8 @@ import jedi from jedi._compatibility import py_version from jedi.api.environment import get_default_environment, find_virtualenvs, \ InvalidPythonEnvironment, find_system_environments, \ - get_system_environment, create_environment + get_system_environment, create_environment, InterpreterEnvironment, \ + get_cached_default_environment def test_sys_path(): @@ -44,7 +44,7 @@ def test_versions(version): def test_load_module(evaluator): access_path = evaluator.compiled_subprocess.load_module( - name=u'math', + dotted_name=u'math', sys_path=evaluator.get_sys_path() ) name, access_handle = access_path.accesses[0] @@ -55,7 +55,10 @@ def test_load_module(evaluator): access_handle.py__mro__() -def test_error_in_environment(evaluator, Script): +def test_error_in_environment(evaluator, Script, environment): + if isinstance(environment, InterpreterEnvironment): + pytest.skip("We don't catch these errors at the moment.") + # Provoke an error to show how Jedi can recover from it. with pytest.raises(jedi.InternalError): evaluator.compiled_subprocess._test_raise_error(KeyboardInterrupt) @@ -72,9 +75,11 @@ def test_stdout_in_subprocess(evaluator, Script): Script('1').goto_definitions() -def test_killed_subprocess(evaluator, Script): +def test_killed_subprocess(evaluator, Script, environment): + if isinstance(environment, InterpreterEnvironment): + pytest.skip("We cannot kill our own process") # Just kill the subprocess. - evaluator.compiled_subprocess._compiled_subprocess._process.kill() + evaluator.compiled_subprocess._compiled_subprocess._get_process().kill() # Since the process was terminated (and nobody knows about it) the first # Jedi call fails. with pytest.raises(jedi.InternalError): @@ -85,34 +90,22 @@ def test_killed_subprocess(evaluator, Script): assert def_.name == 'str' -@contextmanager -def set_environment_variable(name, value): - tmp = os.environ.get(name) - try: - os.environ[name] = value - yield - finally: - if tmp is None: - del os.environ[name] - else: - os.environ[name] = tmp - - -def test_not_existing_virtualenv(): +def test_not_existing_virtualenv(monkeypatch): """Should not match the path that was given""" path = '/foo/bar/jedi_baz' - with set_environment_variable('VIRTUAL_ENV', path): - assert get_default_environment().executable != path + monkeypatch.setenv('VIRTUAL_ENV', path) + assert get_default_environment().executable != path -def test_working_venv(venv_path): - with set_environment_variable('VIRTUAL_ENV', venv_path): - assert get_default_environment().path == venv_path +def test_working_venv(venv_path, monkeypatch): + monkeypatch.setenv('VIRTUAL_ENV', venv_path) + assert get_default_environment().path == venv_path def test_scanning_venvs(venv_path): parent_dir = os.path.dirname(venv_path) - assert any(venv.path == venv_path for venv in find_virtualenvs([parent_dir])) + assert any(venv.path == venv_path + for venv in find_virtualenvs([parent_dir])) def test_create_environment_venv_path(venv_path): @@ -123,3 +116,40 @@ def test_create_environment_venv_path(venv_path): def test_create_environment_executable(): environment = create_environment(sys.executable) assert environment.executable == sys.executable + + +def test_get_default_environment_from_env_does_not_use_safe(tmpdir, monkeypatch): + fake_python = os.path.join(str(tmpdir), 'fake_python') + with open(fake_python, 'w') as f: + f.write('') + + def _get_subprocess(self): + if self._start_executable != fake_python: + raise RuntimeError('Should not get called!') + self.executable = fake_python + self.path = 'fake' + + monkeypatch.setattr('jedi.api.environment.Environment._get_subprocess', + _get_subprocess) + + monkeypatch.setenv('VIRTUAL_ENV', fake_python) + env = get_default_environment() + assert env.path == 'fake' + + +@pytest.mark.parametrize('virtualenv', ['', 'fufuuuuu', sys.prefix]) +def test_get_default_environment_when_embedded(monkeypatch, virtualenv): + # When using Python embedded, sometimes the executable is not a Python + # executable. + executable_name = 'RANDOM_EXE' + monkeypatch.setattr(sys, 'executable', executable_name) + monkeypatch.setenv('VIRTUAL_ENV', virtualenv) + env = get_default_environment() + assert env.executable != executable_name + + +def test_changing_venv(venv_path, monkeypatch): + monkeypatch.setitem(os.environ, 'VIRTUAL_ENV', venv_path) + get_cached_default_environment() + monkeypatch.setitem(os.environ, 'VIRTUAL_ENV', sys.executable) + assert get_cached_default_environment().executable == sys.executable diff --git a/test/test_api/test_full_name.py b/test/test_api/test_full_name.py index 7c952119..9956e6be 100644 --- a/test/test_api/test_full_name.py +++ b/test/test_api/test_full_name.py @@ -18,7 +18,7 @@ import textwrap import pytest import jedi -from ..helpers import TestCase, cwd_at +from ..helpers import TestCase class MixinTestFullName(object): @@ -52,7 +52,7 @@ class TestFullNameWithGotoDefinitions(MixinTestFullName, TestCase): self.check(""" import re any_re = re.compile('.*') - any_re""", '_sre.SRE_Pattern') + any_re""", 'typing.Pattern') def test_from_import(self): self.check('from os import path', 'os.path') @@ -111,5 +111,4 @@ def test_os_path(Script): def test_os_issues(Script): """Issue #873""" - c, = Script('import os\nos.nt''').completions() - assert c.full_name == 'nt' + assert [c.name for c in Script('import os\nos.nt''').completions()] == ['nt'] diff --git a/test/test_api/test_interpreter.py b/test/test_api/test_interpreter.py index b1ca24bb..ac99515b 100644 --- a/test/test_api/test_interpreter.py +++ b/test/test_api/test_interpreter.py @@ -4,7 +4,7 @@ Tests of ``jedi.api.Interpreter``. import pytest import jedi -from jedi._compatibility import is_py3, py_version, is_py35 +from jedi._compatibility import is_py3, py_version from jedi.evaluate.compiled import mixed @@ -244,8 +244,8 @@ def test_completion_params(): script = jedi.Interpreter('foo', [locals()]) c, = script.completions() assert [p.name for p in c.params] == ['a', 'b'] - assert c.params[0]._goto_definitions() == [] - t, = c.params[1]._goto_definitions() + assert c.params[0].infer() == [] + t, = c.params[1].infer() assert t.name == 'int' @@ -258,9 +258,9 @@ def test_completion_param_annotations(): script = jedi.Interpreter('foo', [locals()]) c, = script.completions() a, b, c = c.params - assert a._goto_definitions() == [] - assert [d.name for d in b._goto_definitions()] == ['str'] - assert {d.name for d in c._goto_definitions()} == {'int', 'float'} + assert a.infer() == [] + assert [d.name for d in b.infer()] == ['str'] + assert {d.name for d in c.infer()} == {'int', 'float'} def test_keyword_argument(): @@ -340,7 +340,7 @@ def test_dir_magic_method(): assert 'bar' in names foo = [c for c in completions if c.name == 'foo'][0] - assert foo._goto_definitions() == [] + assert foo.infer() == [] def test_name_not_findable(): @@ -356,3 +356,9 @@ def test_name_not_findable(): setattr(X, 'NOT_FINDABLE', X.hidden) assert jedi.Interpreter("X.NOT_FINDA", [locals()]).completions() + + +def test_sys_path_docstring(): # Was an issue in #1298 + import jedi + s = jedi.Interpreter("from sys import path\npath", line=2, column=4, namespaces=[locals()]) + s.completions()[0].docstring() diff --git a/test/test_api/test_project.py b/test/test_api/test_project.py index a0f4e4d5..328333d2 100644 --- a/test/test_api/test_project.py +++ b/test/test_api/test_project.py @@ -12,4 +12,4 @@ def test_django_default_project(Script): ) c, = script.completions() assert c.name == "SomeModel" - assert script._project._django is True + assert script._evaluator.project._django is True diff --git a/test/test_api/test_settings.py b/test/test_api/test_settings.py index 522bf82d..24ae05fe 100644 --- a/test/test_api/test_settings.py +++ b/test/test_api/test_settings.py @@ -21,3 +21,14 @@ def test_add_dynamic_mods(Script): result = script.goto_definitions() assert len(result) == 1 assert result[0].description == 'class int' + + +def test_add_bracket_after_function(monkeypatch, Script): + settings = api.settings + monkeypatch.setattr(settings, 'add_bracket_after_function', True) + script = Script('''\ +def foo(): + pass +foo''') + completions = script.completions() + assert completions[0].complete == '(' diff --git a/test/test_api/test_unicode.py b/test/test_api/test_unicode.py index c74cb686..7fd28bb2 100644 --- a/test/test_api/test_unicode.py +++ b/test/test_api/test_unicode.py @@ -72,3 +72,9 @@ def test_wrong_encoding(Script, tmpdir): c, = Script('import x; x.foo', sys_path=[tmpdir.strpath]).completions() assert c.name == 'foobar' + + +def test_encoding_parameter(Script): + name = u('hö') + s = Script(name.encode('latin-1'), encoding='latin-1') + assert s._module_node.get_code() == name diff --git a/test/test_api/test_usages.py b/test/test_api/test_usages.py index 94deb0f9..245951b7 100644 --- a/test/test_api/test_usages.py +++ b/test/test_api/test_usages.py @@ -1,3 +1,14 @@ def test_import_usage(Script): s = Script("from .. import foo", line=1, column=18, path="foo.py") assert [usage.line for usage in s.usages()] == [1] + + +def test_exclude_builtin_modules(Script): + def get(include): + return [(d.line, d.column) for d in Script(source, column=8).usages(include_builtins=include)] + source = '''import sys\nprint(sys.path)''' + places = get(include=True) + assert len(places) > 2 # Includes stubs + + places = get(include=False) + assert places == [(1, 7), (2, 6)] diff --git a/test/test_evaluate/test_compiled.py b/test/test_evaluate/test_compiled.py index a0b3ca81..b80b9add 100644 --- a/test/test_evaluate/test_compiled.py +++ b/test/test_evaluate/test_compiled.py @@ -1,29 +1,37 @@ from textwrap import dedent +import pytest + from jedi.evaluate import compiled -from jedi.evaluate.context import instance +from jedi.evaluate.helpers import execute_evaluated +from jedi.evaluate.gradual.conversion import stub_to_actual_context_set -def test_simple(evaluator): +def test_simple(evaluator, environment): obj = compiled.create_simple_object(evaluator, u'_str_') upper, = obj.py__getattribute__(u'upper') - objs = list(upper.execute_evaluated()) + objs = list(execute_evaluated(upper)) assert len(objs) == 1 - assert isinstance(objs[0], instance.CompiledInstance) + if environment.version_info.major == 2: + expected = 'unicode' + else: + expected = 'str' + assert objs[0].name.string_name == expected -def test_fake_loading(evaluator): - builtin = compiled.get_special_object(evaluator, u'BUILTINS') - string, = builtin.py__getattribute__(u'str') - from_name = compiled.context.create_from_name(evaluator, string, u'__init__') +def test_builtin_loading(evaluator): + string, = evaluator.builtins_module.py__getattribute__(u'str') + from_name, = string.py__getattribute__(u'__init__') assert from_name.tree_node + assert not from_name.py__doc__() # It's a stub -def test_fake_docstr(evaluator): +def test_next_docstr(evaluator): next_ = compiled.builtin_from_name(evaluator, u'next') - assert next_.py__doc__() assert next_.tree_node is not None - assert next_.py__doc__() == next.__doc__ + assert next_.py__doc__() == '' # It's a stub + for non_stub in stub_to_actual_context_set(next_): + assert non_stub.py__doc__() == next.__doc__ def test_parse_function_doc_illegal_docstr(): @@ -42,7 +50,7 @@ def test_doc(evaluator): """ str_ = compiled.create_simple_object(evaluator, u'') # Equals `''.__getnewargs__` - obj = compiled.create_from_name(evaluator, str_, u'__getnewargs__') + obj, = str_.py__getattribute__(u'__getnewargs__') assert obj.py__doc__() == '' @@ -70,18 +78,26 @@ def test_method_completion(Script, environment): foo = Foo() foo.bar.__func__''') - if environment.version_info.major > 2: - result = [] - else: - result = ['__func__'] - assert [c.name for c in Script(code).completions()] == result + assert [c.name for c in Script(code).completions()] == ['__func__'] def test_time_docstring(Script): import time comp, = Script('import time\ntime.sleep').completions() - assert comp.docstring() == time.sleep.__doc__ + assert comp.docstring(raw=True) == time.sleep.__doc__ + expected = 'sleep(secs: float) -> None\n\n' + time.sleep.__doc__ + assert comp.docstring() == expected -def test_dict_values(Script): +def test_dict_values(Script, environment): + if environment.version_info.major == 2: + # It looks like typeshed for Python 2 returns Any. + pytest.skip() assert Script('import sys\nsys.modules["alshdb;lasdhf"]').goto_definitions() + + +def test_getitem_on_none(Script): + script = Script('None[1j]') + assert not script.goto_definitions() + issue, = script._evaluator.analysis + assert issue.name == 'type-error-not-subscriptable' diff --git a/test/test_evaluate/test_context.py b/test/test_evaluate/test_context.py index 116b64c8..bc301c09 100644 --- a/test/test_evaluate/test_context.py +++ b/test/test_evaluate/test_context.py @@ -3,5 +3,5 @@ def test_module_attributes(Script): assert def_.name == '__name__' assert def_.line is None assert def_.column is None - str_, = def_._goto_definitions() + str_, = def_.infer() assert str_.name == 'str' diff --git a/test/test_evaluate/test_docstring.py b/test/test_evaluate/test_docstring.py index 3dfbc7af..676f4e8f 100644 --- a/test/test_evaluate/test_docstring.py +++ b/test/test_evaluate/test_docstring.py @@ -35,7 +35,20 @@ def test_class_doc(Script): class TestClass(): '''Docstring of `TestClass`.''' TestClass""").goto_definitions() - assert defs[0].docstring() == 'Docstring of `TestClass`.' + + expected = 'Docstring of `TestClass`.' + assert defs[0].docstring(raw=True) == expected + assert defs[0].docstring() == 'TestClass()\n\n' + expected + + +def test_class_doc_with_init(Script): + d, = Script(""" + class TestClass(): + '''Docstring''' + def __init__(self, foo, bar=3): pass + TestClass""").goto_definitions() + + assert d.docstring() == 'TestClass(foo, bar=3)\n\nDocstring' def test_instance_doc(Script): @@ -142,6 +155,16 @@ def test_docstring_keyword(Script): assert 'assert' in completions[0].docstring() +def test_docstring_params_formatting(Script): + defs = Script(""" + def func(param1, + param2, + param3): + pass + func""").goto_definitions() + assert defs[0].docstring() == 'func(param1, param2, param3)' + + # ---- Numpy Style Tests --- @pytest.mark.skipif(numpydoc_unavailable, @@ -349,7 +372,6 @@ def test_numpy_returns(): x.d''' ) names = [c.name for c in jedi.Script(s).completions()] - print(names) assert 'diagonal' in names @@ -362,5 +384,4 @@ def test_numpy_comp_returns(): x.d''' ) names = [c.name for c in jedi.Script(s).completions()] - print(names) assert 'diagonal' in names diff --git a/test/test_evaluate/test_gradual/test_stub_loading.py b/test/test_evaluate/test_gradual/test_stub_loading.py new file mode 100644 index 00000000..40dcffa8 --- /dev/null +++ b/test/test_evaluate/test_gradual/test_stub_loading.py @@ -0,0 +1,27 @@ +from functools import partial +from test.helpers import get_example_dir +from jedi.api.project import Project + +import pytest + + +@pytest.fixture +def ScriptInStubFolder(Script): + path = get_example_dir('stub_packages') + project = Project(path, sys_path=[path], smart_sys_path=False) + return partial(Script, _project=project) + + +@pytest.mark.parametrize( + ('code', 'expected'), [ + ('from no_python import foo', ['int']), + ('from with_python import stub_only', ['str']), + ('from with_python import python_only', ['int']), + ('from with_python import both', ['int']), + ('from with_python import something_random', []), + ('from with_python.module import in_sub_module', ['int']), + ] +) +def test_find_stubs_infer(ScriptInStubFolder, code, expected): + defs = ScriptInStubFolder(code).goto_definitions() + assert [d.name for d in defs] == expected diff --git a/test/test_evaluate/test_gradual/test_typeshed.py b/test/test_evaluate/test_gradual/test_typeshed.py new file mode 100644 index 00000000..8dc5fc0f --- /dev/null +++ b/test/test_evaluate/test_gradual/test_typeshed.py @@ -0,0 +1,236 @@ +import os + +import pytest +from parso.utils import PythonVersionInfo + +from jedi.evaluate.gradual import typeshed, stub_context +from jedi.evaluate.context import TreeInstance, BoundMethod, FunctionContext, \ + MethodContext, ClassContext + +TYPESHED_PYTHON3 = os.path.join(typeshed.TYPESHED_PATH, 'stdlib', '3') + + +def test_get_typeshed_directories(): + def get_dirs(version_info): + return { + d.replace(typeshed.TYPESHED_PATH, '').lstrip(os.path.sep) + for d in typeshed._get_typeshed_directories(version_info) + } + + def transform(set_): + return {x.replace('/', os.path.sep) for x in set_} + + dirs = get_dirs(PythonVersionInfo(2, 7)) + assert dirs == transform({'stdlib/2and3', 'stdlib/2', 'third_party/2and3', 'third_party/2'}) + + dirs = get_dirs(PythonVersionInfo(3, 4)) + assert dirs == transform({'stdlib/2and3', 'stdlib/3', 'third_party/2and3', 'third_party/3'}) + + dirs = get_dirs(PythonVersionInfo(3, 5)) + assert dirs == transform({'stdlib/2and3', 'stdlib/3', 'stdlib/3.5', + 'third_party/2and3', 'third_party/3', 'third_party/3.5'}) + + dirs = get_dirs(PythonVersionInfo(3, 6)) + assert dirs == transform({'stdlib/2and3', 'stdlib/3', 'stdlib/3.5', + 'stdlib/3.6', 'third_party/2and3', + 'third_party/3', 'third_party/3.5', 'third_party/3.6'}) + + +def test_get_stub_files(): + def get_map(version_info): + return typeshed._create_stub_map(version_info) + + map_ = typeshed._create_stub_map(TYPESHED_PYTHON3) + assert map_['functools'] == os.path.join(TYPESHED_PYTHON3, 'functools.pyi') + + +def test_function(Script, environment): + code = 'import threading; threading.current_thread' + def_, = Script(code).goto_definitions() + context = def_._name._context + assert isinstance(context, FunctionContext), context + + def_, = Script(code + '()').goto_definitions() + context = def_._name._context + assert isinstance(context, TreeInstance) + + def_, = Script('import threading; threading.Thread').goto_definitions() + assert isinstance(def_._name._context, ClassContext), def_ + + +def test_keywords_variable(Script): + code = 'import keyword; keyword.kwlist' + for seq in Script(code).goto_definitions(): + assert seq.name == 'Sequence' + # This points towards the typeshed implementation + stub_seq, = seq.goto_stubs() + assert typeshed.TYPESHED_PATH in stub_seq.module_path + + +def test_class(Script): + def_, = Script('import threading; threading.Thread').goto_definitions() + context = def_._name._context + assert isinstance(context, ClassContext), context + + +def test_instance(Script): + def_, = Script('import threading; threading.Thread()').goto_definitions() + context = def_._name._context + assert isinstance(context, TreeInstance) + + +def test_class_function(Script): + def_, = Script('import threading; threading.Thread.getName').goto_definitions() + context = def_._name._context + assert isinstance(context, MethodContext), context + + +def test_method(Script): + code = 'import threading; threading.Thread().getName' + def_, = Script(code).goto_definitions() + context = def_._name._context + assert isinstance(context, BoundMethod), context + assert isinstance(context._wrapped_context, MethodContext), context + + def_, = Script(code + '()').goto_definitions() + context = def_._name._context + assert isinstance(context, TreeInstance) + assert context.class_context.py__name__() == 'str' + + +def test_sys_exc_info(Script): + code = 'import sys; sys.exc_info()' + none, def_ = Script(code + '[1]').goto_definitions() + # It's an optional. + assert def_.name == 'BaseException' + assert def_.type == 'instance' + assert none.name == 'NoneType' + + none, def_ = Script(code + '[0]').goto_definitions() + assert def_.name == 'BaseException' + assert def_.type == 'class' + + +def test_sys_getwindowsversion(Script, environment): + # This should only exist on Windows, but type inference should happen + # everywhere. + definitions = Script('import sys; sys.getwindowsversion().major').goto_definitions() + if environment.version_info.major == 2: + assert not definitions + else: + def_, = definitions + assert def_.name == 'int' + + +def test_sys_hexversion(Script): + script = Script('import sys; sys.hexversion') + def_, = script.completions() + assert isinstance(def_._name, stub_context._StubName), def_._name + assert typeshed.TYPESHED_PATH in def_.module_path + def_, = script.goto_definitions() + assert def_.name == 'int' + + +def test_math(Script): + def_, = Script('import math; math.acos()').goto_definitions() + assert def_.name == 'float' + context = def_._name._context + assert context + + +def test_type_var(Script): + def_, = Script('import typing; T = typing.TypeVar("T1")').goto_definitions() + assert def_.name == 'TypeVar' + assert def_.description == 'TypeVar = object()' + + +@pytest.mark.parametrize( + 'code, full_name', ( + ('import math', 'math'), + ('from math import cos', 'math.cos') + ) +) +def test_math_is_stub(Script, code, full_name): + s = Script(code) + cos, = s.goto_definitions() + wanted = os.path.join('typeshed', 'stdlib', '2and3', 'math.pyi') + assert cos.module_path.endswith(wanted) + assert cos.is_stub() is True + assert cos.goto_stubs() == [cos] + assert cos.full_name == full_name + + cos, = s.goto_assignments() + assert cos.module_path.endswith(wanted) + assert cos.goto_stubs() == [cos] + assert cos.is_stub() is True + assert cos.full_name == full_name + + +def test_goto_stubs(Script): + s = Script('import os; os') + os_module, = s.goto_definitions() + assert os_module.full_name == 'os' + assert os_module.is_stub() is False + stub, = os_module.goto_stubs() + assert stub.is_stub() is True + + os_module, = s.goto_assignments() + + +def _assert_is_same(d1, d2): + assert d1.name == d2.name + assert d1.module_path == d2.module_path + assert d1.line == d2.line + assert d1.column == d2.column + + +@pytest.mark.parametrize('type_', ['goto', 'infer']) +@pytest.mark.parametrize( + 'code', [ + 'import os; os.walk', + 'from collections import Counter; Counter', + 'from collections import Counter; Counter()', + 'from collections import Counter; Counter.most_common', + ]) +def test_goto_stubs_on_itself(Script, code, type_): + """ + If goto_stubs is used on an identifier in e.g. the stdlib, we should goto + the stub of it. + """ + s = Script(code) + if type_ == 'infer': + def_, = s.goto_definitions() + else: + def_, = s.goto_assignments(follow_imports=True) + stub, = def_.goto_stubs() + + script_on_source = Script( + path=def_.module_path, + line=def_.line, + column=def_.column + ) + if type_ == 'infer': + definition, = script_on_source.goto_definitions() + else: + definition, = script_on_source.goto_assignments() + same_stub, = definition.goto_stubs() + _assert_is_same(same_stub, stub) + _assert_is_same(definition, def_) + assert same_stub.module_path != def_.module_path + + # And the reverse. + script_on_stub = Script( + path=same_stub.module_path, + line=same_stub.line, + column=same_stub.column + ) + + if type_ == 'infer': + same_definition, = script_on_stub.goto_definitions() + same_definition2, = same_stub.infer() + else: + same_definition, = script_on_stub.goto_assignments() + same_definition2, = same_stub.goto_assignments() + + _assert_is_same(same_definition, definition) + _assert_is_same(same_definition, same_definition2) diff --git a/test/test_evaluate/test_imports.py b/test/test_evaluate/test_imports.py index fc0a790a..eead0d49 100644 --- a/test/test_evaluate/test_imports.py +++ b/test/test_evaluate/test_imports.py @@ -6,61 +6,106 @@ Tests". import os import pytest +from parso.file_io import FileIO from jedi._compatibility import find_module_py33, find_module from jedi.evaluate import compiled -from ..helpers import cwd_at +from jedi.evaluate import imports +from jedi.api.project import Project +from jedi.evaluate.gradual.conversion import stub_to_actual_context_set +from ..helpers import cwd_at, get_example_dir, test_dir, root_dir + +THIS_DIR = os.path.dirname(__file__) @pytest.mark.skipif('sys.version_info < (3,3)') def test_find_module_py33(): """Needs to work like the old find_module.""" - assert find_module_py33('_io') == (None, '_io', False) + assert find_module_py33('_io') == (None, False) + with pytest.raises(ImportError): + assert find_module_py33('_DOESNTEXIST_') == (None, None) def test_find_module_package(): - file, path, is_package = find_module('json') - assert file is None - assert path.endswith('json') + file_io, is_package = find_module('json') + assert file_io.path.endswith(os.path.join('json', '__init__.py')) assert is_package is True def test_find_module_not_package(): - file, path, is_package = find_module('io') - assert file is not None - assert path.endswith('io.py') + file_io, is_package = find_module('io') + assert file_io.path.endswith('io.py') assert is_package is False +pkg_zip_path = os.path.join(os.path.dirname(__file__), 'zipped_imports/pkg.zip') + + def test_find_module_package_zipped(Script, evaluator, environment): - path = os.path.join(os.path.dirname(__file__), 'zipped_imports/pkg.zip') - sys_path = environment.get_sys_path() + [path] + sys_path = environment.get_sys_path() + [pkg_zip_path] script = Script('import pkg; pkg.mod', sys_path=sys_path) assert len(script.completions()) == 1 - code, path, is_package = evaluator.compiled_subprocess.get_module_info( + file_io, is_package = evaluator.compiled_subprocess.get_module_info( sys_path=sys_path, string=u'pkg', full_name=u'pkg' ) - assert code is not None - assert path.endswith('pkg.zip') + assert file_io is not None + assert file_io.path.endswith(os.path.join('pkg.zip', 'pkg', '__init__.py')) + assert file_io._zip_path.endswith('pkg.zip') assert is_package is True +@pytest.mark.parametrize( + 'code, file, package, path', [ + ('import pkg', '__init__.py', 'pkg', 'pkg'), + ('import pkg', '__init__.py', 'pkg', 'pkg'), + + ('from pkg import module', 'module.py', 'pkg', None), + ('from pkg.module', 'module.py', 'pkg', None), + + ('from pkg import nested', os.path.join('nested', '__init__.py'), + 'pkg.nested', os.path.join('pkg', 'nested')), + ('from pkg.nested', os.path.join('nested', '__init__.py'), + 'pkg.nested', os.path.join('pkg', 'nested')), + + ('from pkg.nested import nested_module', + os.path.join('nested', 'nested_module.py'), 'pkg.nested', None), + ('from pkg.nested.nested_module', + os.path.join('nested', 'nested_module.py'), 'pkg.nested', None), + + ('from pkg.namespace import namespace_module', + os.path.join('namespace', 'namespace_module.py'), 'pkg.namespace', None), + ('from pkg.namespace.namespace_module', + os.path.join('namespace', 'namespace_module.py'), 'pkg.namespace', None), + ] + +) +def test_correct_zip_package_behavior(Script, evaluator, environment, code, + file, package, path, skip_python2): + sys_path = environment.get_sys_path() + [pkg_zip_path] + pkg, = Script(code, sys_path=sys_path).goto_definitions() + context, = pkg._name.infer() + assert context.py__file__() == os.path.join(pkg_zip_path, 'pkg', file) + assert '.'.join(context.py__package__()) == package + assert context.is_package is (path is not None) + if path is not None: + assert context.py__path__() == [os.path.join(pkg_zip_path, path)] + + def test_find_module_not_package_zipped(Script, evaluator, environment): path = os.path.join(os.path.dirname(__file__), 'zipped_imports/not_pkg.zip') sys_path = environment.get_sys_path() + [path] script = Script('import not_pkg; not_pkg.val', sys_path=sys_path) assert len(script.completions()) == 1 - code, path, is_package = evaluator.compiled_subprocess.get_module_info( + file_io, is_package = evaluator.compiled_subprocess.get_module_info( sys_path=sys_path, string=u'not_pkg', full_name=u'not_pkg' ) - assert code is not None - assert path.endswith('not_pkg.zip') + assert file_io.path.endswith(os.path.join('not_pkg.zip', 'not_pkg.py')) assert is_package is False @@ -87,10 +132,10 @@ def test_import_not_in_sys_path(Script): ("from flask.ext.", "bar"), ("from flask.ext.", "baz"), ("from flask.ext.", "moo"), - pytest.mark.xfail(("import flask.ext.foo; flask.ext.foo.", "Foo")), - pytest.mark.xfail(("import flask.ext.bar; flask.ext.bar.", "Foo")), - pytest.mark.xfail(("import flask.ext.baz; flask.ext.baz.", "Foo")), - pytest.mark.xfail(("import flask.ext.moo; flask.ext.moo.", "Foo")), + pytest.param("import flask.ext.foo; flask.ext.foo.", "Foo", marks=pytest.mark.xfail), + pytest.param("import flask.ext.bar; flask.ext.bar.", "Foo", marks=pytest.mark.xfail), + pytest.param("import flask.ext.baz; flask.ext.baz.", "Foo", marks=pytest.mark.xfail), + pytest.param("import flask.ext.moo; flask.ext.moo.", "Foo", marks=pytest.mark.xfail), ]) def test_flask_ext(Script, code, name): """flask.ext.foo is really imported from flaskext.foo or flask_foo. @@ -132,13 +177,13 @@ def test_cache_works_with_sys_path_param(Script, tmpdir): def test_import_completion_docstring(Script): import abc s = Script('"""test"""\nimport ab') - completions = s.completions() - assert len(completions) == 1 - assert completions[0].docstring(fast=False) == abc.__doc__ + abc_completions = [c for c in s.completions() if c.name == 'abc'] + assert len(abc_completions) == 1 + assert abc_completions[0].docstring(fast=False) == abc.__doc__ # However for performance reasons not all modules are loaded and the # docstring is empty in this case. - assert completions[0].docstring() == '' + assert abc_completions[0].docstring() == '' def test_goto_definition_on_import(Script): @@ -249,5 +294,146 @@ def test_compiled_import_none(monkeypatch, Script): """ Related to #1079. An import might somehow fail and return None. """ + script = Script('import sys') monkeypatch.setattr(compiled, 'load_module', lambda *args, **kwargs: None) - assert not Script('import sys').goto_definitions() + def_, = script.goto_definitions() + assert def_.type == 'module' + context, = def_._name.infer() + assert not stub_to_actual_context_set(context) + + +@pytest.mark.parametrize( + ('path', 'is_package', 'goal'), [ + (os.path.join(THIS_DIR, 'test_docstring.py'), False, ('ok', 'lala', 'test_imports')), + (os.path.join(THIS_DIR, '__init__.py'), True, ('ok', 'lala', 'x', 'test_imports')), + ] +) +def test_get_modules_containing_name(evaluator, path, goal, is_package): + module = imports._load_python_module( + evaluator, + FileIO(path), + import_names=('ok', 'lala', 'x'), + is_package=is_package, + ) + assert module + input_module, found_module = imports.get_modules_containing_name( + evaluator, + [module], + 'string_that_only_exists_here' + ) + assert input_module is module + assert found_module.string_names == goal + + +@pytest.mark.parametrize( + 'path', ('api/whatever/test_this.py', 'api/whatever/file')) +@pytest.mark.parametrize('empty_sys_path', (False, True)) +def test_relative_imports_with_multiple_similar_directories(Script, path, empty_sys_path): + dir = get_example_dir('issue1209') + if empty_sys_path: + project = Project(dir, sys_path=(), smart_sys_path=False) + else: + project = Project(dir) + script = Script( + "from . ", + path=os.path.join(dir, path), + _project=project, + ) + name, import_ = script.completions() + assert import_.name == 'import' + assert name.name == 'api_test1' + + +def test_relative_imports_with_outside_paths(Script): + dir = get_example_dir('issue1209') + project = Project(dir, sys_path=[], smart_sys_path=False) + script = Script( + "from ...", + path=os.path.join(dir, 'api/whatever/test_this.py'), + _project=project, + ) + assert [c.name for c in script.completions()] == ['api', 'import', 'whatever'] + + script = Script( + "from " + '.' * 100, + path=os.path.join(dir, 'api/whatever/test_this.py'), + _project=project, + ) + assert [c.name for c in script.completions()] == ['import'] + + +@cwd_at('test/examples/issue1209/api/whatever/') +def test_relative_imports_without_path(Script): + project = Project('.', sys_path=[], smart_sys_path=False) + script = Script("from . ", _project=project) + assert [c.name for c in script.completions()] == ['api_test1', 'import'] + + script = Script("from .. ", _project=project) + assert [c.name for c in script.completions()] == ['import', 'whatever'] + + script = Script("from ... ", _project=project) + assert [c.name for c in script.completions()] == ['api', 'import', 'whatever'] + + +def test_relative_import_out_of_file_system(Script): + script = Script("from " + '.' * 100) + import_, = script.completions() + assert import_.name == 'import' + + script = Script("from " + '.' * 100 + 'abc import ABCMeta') + assert not script.goto_definitions() + assert not script.completions() + + +@pytest.mark.parametrize( + 'level, directory, project_path, result', [ + (1, '/a/b/c', '/a', (['b', 'c'], '/a')), + (2, '/a/b/c', '/a', (['b'], '/a')), + (3, '/a/b/c', '/a', ([], '/a')), + (4, '/a/b/c', '/a', (None, '/')), + (5, '/a/b/c', '/a', (None, None)), + (1, '/', '/', ([], '/')), + (2, '/', '/', (None, None)), + (1, '/a/b', '/a/b/c', (None, '/a/b')), + (2, '/a/b', '/a/b/c', (None, '/a')), + (3, '/a/b', '/a/b/c', (None, '/')), + ] +) +def test_level_to_import_path(level, directory, project_path, result): + assert imports._level_to_base_import_path(project_path, directory, level) == result + + +def test_import_name_calculation(Script): + s = Script(path=os.path.join(test_dir, 'completion', 'isinstance.py')) + m = s._get_module() + assert m.string_names == ('test', 'completion', 'isinstance') + + +@pytest.mark.parametrize('name', ('builtins', 'typing')) +def test_pre_defined_imports_module(Script, environment, name): + if environment.version_info.major < 3 and name == 'builtins': + name = '__builtin__' + + path = os.path.join(root_dir, name + '.py') + module = Script('', path=path)._get_module() + assert module.string_names == (name,) + + assert module.evaluator.builtins_module.py__file__() != path + assert module.evaluator.typing_module.py__file__() != path + + +@pytest.mark.parametrize('name', ('builtins', 'typing')) +def test_import_needed_modules_by_jedi(Script, environment, tmpdir, name): + if environment.version_info.major < 3 and name == 'builtins': + name = '__builtin__' + + module_path = tmpdir.join(name + '.py') + module_path.write('int = ...') + script = Script( + 'import ' + name, + path=tmpdir.join('something.py').strpath, + sys_path=[tmpdir.strpath] + environment.get_sys_path(), + ) + module, = script.goto_definitions() + assert module._evaluator.builtins_module.py__file__() != module_path + assert module._evaluator.typing_module.py__file__() != module_path diff --git a/test/test_evaluate/test_literals.py b/test/test_evaluate/test_literals.py index 8ca9a9fc..dd72f8e8 100644 --- a/test/test_evaluate/test_literals.py +++ b/test/test_evaluate/test_literals.py @@ -1,12 +1,12 @@ import pytest -from jedi.evaluate.context import CompiledInstance +from jedi.evaluate.context import TreeInstance def _eval_literal(Script, code, is_fstring=False): def_, = Script(code).goto_definitions() if is_fstring: assert def_.name == 'str' - assert isinstance(def_._name._context, CompiledInstance) + assert isinstance(def_._name._context, TreeInstance) return '' else: return def_._name._context.get_safe_value() diff --git a/test/test_evaluate/test_namespace_package.py b/test/test_evaluate/test_namespace_package.py index 4500c297..1b156583 100644 --- a/test/test_evaluate/test_namespace_package.py +++ b/test/test_evaluate/test_namespace_package.py @@ -1,6 +1,9 @@ from os.path import dirname, join import pytest +import py + +from ..helpers import get_example_dir SYS_PATH = [join(dirname(__file__), d) @@ -72,3 +75,22 @@ def test_nested_namespace_package(Script): result = script.goto_definitions() assert len(result) == 1 + + +def test_relative_import(Script, environment, tmpdir): + """ + Attempt a relative import in a very simple namespace package. + """ + if environment.version_info < (3, 4): + pytest.skip() + + directory = get_example_dir('namespace_package_relative_import') + # Need to copy the content in a directory where there's no __init__.py. + py.path.local(directory).copy(tmpdir) + file_path = join(tmpdir.strpath, "rel1.py") + script = Script(path=file_path, line=1) + d, = script.goto_definitions() + assert d.name == 'int' + d, = script.goto_assignments() + assert d.name == 'name' + assert d.module_name == 'rel2' diff --git a/test/test_evaluate/test_precedence.py b/test/test_evaluate/test_precedence.py index 722d2d05..2e97119e 100644 --- a/test/test_evaluate/test_precedence.py +++ b/test/test_evaluate/test_precedence.py @@ -4,9 +4,10 @@ import pytest @pytest.mark.parametrize('source', [ - '1 == 1', - '1.0 == 1', - '... == ...' + pytest.param('1 == 1'), + pytest.param('1.0 == 1'), + # Unfortunately for now not possible, because it's a typeshed object. + pytest.param('... == ...', marks=pytest.mark.xfail), ]) def test_equals(Script, environment, source): if environment.version_info.major < 3: diff --git a/test/test_evaluate/test_pyc.py b/test/test_evaluate/test_pyc.py index d31254cc..37449572 100644 --- a/test/test_evaluate/test_pyc.py +++ b/test/test_evaluate/test_pyc.py @@ -10,8 +10,10 @@ import os import shutil import sys +import pytest + import jedi -from ..helpers import cwd_at +from jedi.api.environment import SameEnvironment, InterpreterEnvironment SRC = """class Foo: @@ -22,35 +24,50 @@ class Bar: """ -def generate_pyc(): - os.mkdir("dummy_package") - with open("dummy_package/__init__.py", 'w'): +@pytest.fixture +def pyc_project_path(tmpdir): + path = tmpdir.strpath + dummy_package_path = os.path.join(path, "dummy_package") + os.mkdir(dummy_package_path) + with open(os.path.join(dummy_package_path, "__init__.py"), 'w'): pass - with open("dummy_package/dummy.py", 'w') as f: + + dummy_path = os.path.join(dummy_package_path, 'dummy.py') + with open(dummy_path, 'w') as f: f.write(SRC) import compileall - compileall.compile_file("dummy_package/dummy.py") - os.remove("dummy_package/dummy.py") + compileall.compile_file(dummy_path) + os.remove(dummy_path) - if sys.version_info[0] == 3: + if sys.version_info.major == 3: # Python3 specific: # To import pyc modules, we must move them out of the __pycache__ # directory and rename them to remove ".cpython-%s%d" # see: http://stackoverflow.com/questions/11648440/python-does-not-detect-pyc-files - for f in os.listdir("dummy_package/__pycache__"): + pycache = os.path.join(dummy_package_path, "__pycache__") + for f in os.listdir(pycache): dst = f.replace('.cpython-%s%s' % sys.version_info[:2], "") - dst = os.path.join("dummy_package", dst) - shutil.copy(os.path.join("dummy_package/__pycache__", f), dst) + dst = os.path.join(dummy_package_path, dst) + shutil.copy(os.path.join(pycache, f), dst) + try: + yield path + finally: + shutil.rmtree(path) -@cwd_at('test/test_evaluate') -def test_pyc(Script): +def test_pyc(pyc_project_path, environment): """ The list of completion must be greater than 2. """ - try: - generate_pyc() - s = jedi.Script("from dummy_package import dummy; dummy.", path='blub.py') - assert len(s.completions()) >= 2 - finally: - shutil.rmtree("dummy_package") + path = os.path.join(pyc_project_path, 'blub.py') + if not isinstance(environment, InterpreterEnvironment): + # We are using the same version for pyc completions here, because it + # was compiled in that version. However with interpreter environments + # we also have the same version and it's easier to debug. + environment = SameEnvironment() + environment = environment + s = jedi.Script( + "from dummy_package import dummy; dummy.", + path=path, + environment=environment) + assert len(s.completions()) >= 2 diff --git a/test/test_evaluate/test_representation.py b/test/test_evaluate/test_representation.py index 61130230..11b528d3 100644 --- a/test/test_evaluate/test_representation.py +++ b/test/test_evaluate/test_representation.py @@ -1,5 +1,7 @@ from textwrap import dedent +from jedi.evaluate.helpers import execute_evaluated + def get_definition_and_evaluator(Script, source): first, = Script(dedent(source)).goto_definitions() @@ -20,8 +22,8 @@ def test_function_execution(Script): # Now just use the internals of the result (easiest way to get a fully # usable function). # Should return the same result both times. - assert len(func.execute_evaluated()) == 1 - assert len(func.execute_evaluated()) == 1 + assert len(execute_evaluated(func)) == 1 + assert len(execute_evaluated(func)) == 1 def test_class_mro(Script): diff --git a/test/test_evaluate/test_stdlib.py b/test/test_evaluate/test_stdlib.py index d3f0e620..58898ead 100644 --- a/test/test_evaluate/test_stdlib.py +++ b/test/test_evaluate/test_stdlib.py @@ -7,14 +7,6 @@ from textwrap import dedent import pytest -# The namedtuple is different for different Python2.7 versions. Some versions -# are missing the attribute `_class_template`. -@pytest.fixture(autouse=True) -def skipping(environment): - if environment.version_info.major < 3: - pytest.skip() - - @pytest.mark.parametrize(['letter', 'expected'], [ ('n', ['name']), ('s', ['smart']), @@ -86,3 +78,26 @@ def test_namedtuple_goto_definitions(Script): assert d1.get_line_code() == "class Foo(tuple):\n" assert d1.module_path is None + + +def test_re_sub(Script, environment): + """ + This whole test was taken out of completion/stdlib.py, because of the + version differences. + """ + def run(code): + defs = Script(code).goto_definitions() + return {d.name for d in defs} + + names = run("import re; re.sub('a', 'a', 'f')") + if environment.version_info.major == 2: + assert names == {'str', 'unicode'} + else: + assert names == {'str', 'bytes'} + + # This param is missing because of overloading. + names = run("import re; re.sub('a', 'a')") + if environment.version_info.major == 2: + assert names == {'str', 'unicode'} + else: + assert names == {'str', 'bytes'} diff --git a/test/test_evaluate/test_sys_path.py b/test/test_evaluate/test_sys_path.py index 776f8d04..5885e112 100644 --- a/test/test_evaluate/test_sys_path.py +++ b/test/test_evaluate/test_sys_path.py @@ -3,6 +3,8 @@ from glob import glob import sys import shutil +import pytest + from jedi.evaluate import sys_path from jedi.api.environment import create_environment @@ -29,10 +31,8 @@ def test_paths_from_assignment(Script): def test_venv_and_pths(venv_path): pjoin = os.path.join - virtualenv = create_environment(venv_path) - CUR_DIR = os.path.dirname(__file__) - site_pkg_path = pjoin(virtualenv.path, 'lib') + site_pkg_path = pjoin(venv_path, 'lib') if os.name == 'nt': site_pkg_path = pjoin(site_pkg_path, 'site-packages') else: @@ -40,6 +40,7 @@ def test_venv_and_pths(venv_path): shutil.rmtree(site_pkg_path) shutil.copytree(pjoin(CUR_DIR, 'sample_venvs', 'pth_directory'), site_pkg_path) + virtualenv = create_environment(venv_path) venv_paths = virtualenv.get_sys_path() ETALON = [ @@ -60,3 +61,45 @@ def test_venv_and_pths(venv_path): # Ensure that none of venv dirs leaked to the interpreter. assert not set(sys.path).intersection(ETALON) + + +_s = ['/a', '/b', '/c/d/'] + + +@pytest.mark.parametrize( + 'sys_path_, module_path, expected, is_package', [ + (_s, '/a/b', ('b',), False), + (_s, '/a/b/c', ('b', 'c'), False), + (_s, '/a/b.py', ('b',), False), + (_s, '/a/b/c.py', ('b', 'c'), False), + (_s, '/x/b.py', None, False), + (_s, '/c/d/x.py', ('x',), False), + (_s, '/c/d/x.py', ('x',), False), + (_s, '/c/d/x/y.py', ('x', 'y'), False), + # If dots are in there they also resolve. These are obviously illegal + # in Python, but Jedi can handle them. Give the user a bit more freedom + # that he will have to correct eventually. + (_s, '/a/b.c.py', ('b.c',), False), + (_s, '/a/b.d/foo.bar.py', ('b.d', 'foo.bar'), False), + + (_s, '/a/.py', None, False), + (_s, '/a/c/.py', None, False), + + (['/foo'], '/foo/bar/__init__.py', ('bar',), True), + (['/foo'], '/foo/bar/baz/__init__.py', ('bar', 'baz'), True), + (['/foo'], '/foo/bar.so', ('bar',), False), + (['/foo'], '/foo/bar/__init__.so', ('bar',), True), + (['/foo'], '/x/bar.py', None, False), + (['/foo'], '/foo/bar.xyz', ('bar.xyz',), False), + + (['/foo', '/foo/bar'], '/foo/bar/baz', ('baz',), False), + (['/foo/bar', '/foo'], '/foo/bar/baz', ('baz',), False), + + (['/'], '/bar/baz.py', ('bar', 'baz',), False), + ]) +def test_transform_path_to_dotted(sys_path_, module_path, expected, is_package): + # transform_path_to_dotted expects normalized absolute paths. + sys_path_ = [os.path.abspath(path) for path in sys_path_] + module_path = os.path.abspath(module_path) + assert sys_path.transform_path_to_dotted(sys_path_, module_path) \ + == (expected, is_package) diff --git a/test/test_evaluate/zipped_imports/pkg.zip b/test/test_evaluate/zipped_imports/pkg.zip index ec8eac4d..0344f746 100644 Binary files a/test/test_evaluate/zipped_imports/pkg.zip and b/test/test_evaluate/zipped_imports/pkg.zip differ diff --git a/test/test_parso_integration/test_parser_utils.py b/test/test_parso_integration/test_parser_utils.py index 6cfd702b..c085710c 100644 --- a/test/test_parso_integration/test_parser_utils.py +++ b/test/test_parso_integration/test_parser_utils.py @@ -84,5 +84,3 @@ def test_get_call_signature(code, call_signature): if node.type == 'simple_stmt': node = node.children[0] assert parser_utils.get_call_signature(node) == call_signature - - assert parser_utils.get_doc_with_call_signature(node) == (call_signature + '\n\n') diff --git a/test/test_settings.py b/test/test_settings.py new file mode 100644 index 00000000..1519f0b3 --- /dev/null +++ b/test/test_settings.py @@ -0,0 +1,23 @@ +import pytest + +from jedi import settings +from jedi.evaluate.names import ContextName +from jedi.evaluate.compiled import CompiledContextName +from jedi.evaluate.gradual.typeshed import StubModuleContext + + +@pytest.fixture() +def auto_import_json(monkeypatch): + monkeypatch.setattr(settings, 'auto_import_modules', ['json']) + + +def test_base_auto_import_modules(auto_import_json, Script): + loads, = Script('import json; json.loads').goto_definitions() + assert isinstance(loads._name, ContextName) + context, = loads._name.infer() + assert isinstance(context.parent_context, StubModuleContext) + + +def test_auto_import_modules_imports(auto_import_json, Script): + main, = Script('from json import tool; tool.main').goto_definitions() + assert isinstance(main._name, CompiledContextName) diff --git a/test/test_speed.py b/test/test_speed.py index f3fa7c7b..ba5784ac 100644 --- a/test/test_speed.py +++ b/test/test_speed.py @@ -30,7 +30,7 @@ def _check_speed(time_per_run, number=4, run_warm=True): return decorated -@_check_speed(0.3) +@_check_speed(0.5) def test_os_path_join(Script): s = "from posixpath import join; join('', '')." assert len(Script(s).completions()) > 10 # is a str completion diff --git a/test/test_utils.py b/test/test_utils.py index 2b4fd5a2..17328a36 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -76,7 +76,11 @@ class TestSetupReadline(unittest.TestCase): goal = {s + el for el in dir(os)} # There are minor differences, e.g. the dir doesn't include deleted # items as well as items that are not only available on linux. - assert len(set(self.completions(s)).symmetric_difference(goal)) < 20 + difference = set(self.completions(s)).symmetric_difference(goal) + difference = {x for x in difference if not x.startswith('from os import _')} + # There are quite a few differences, because both Windows and Linux + # (posix and nt) libraries are included. + assert len(difference) < 38 @cwd_at('test') def test_local_import(self): diff --git a/tox.ini b/tox.ini index d5910619..d7eb9144 100644 --- a/tox.ini +++ b/tox.ini @@ -1,16 +1,16 @@ [tox] -envlist = py27, py33, py34, py35, py36 +envlist = py27, py34, py35, py36, py37 [testenv] +extras = testing deps = - pytest>=2.3.5, < 3.7 - pytest-cache -# docopt for sith doctests - docopt -# coloroma for colored debug output - colorama +# for testing the typing module + py27: typing + py34: typing +# numpydoc for typing scipy stack + numpydoc + cov: coverage # Overwrite the parso version (only used sometimes). - git+https://github.com/davidhalter/parso.git -# -rrequirements.txt +# git+https://github.com/davidhalter/parso.git passenv = JEDI_TEST_ENVIRONMENT setenv = # https://github.com/tomchristie/django-rest-framework/issues/1957 @@ -19,42 +19,13 @@ setenv = # To test Jedi in different versions than the same Python version, set a # different test environment. env27: JEDI_TEST_ENVIRONMENT=27 - env33: JEDI_TEST_ENVIRONMENT=33 env34: JEDI_TEST_ENVIRONMENT=34 env35: JEDI_TEST_ENVIRONMENT=35 env36: JEDI_TEST_ENVIRONMENT=36 env37: JEDI_TEST_ENVIRONMENT=37 commands = pytest {posargs} -[testenv:py27] -deps = -# for testing the typing module - typing -# numpydoc for typing scipy stack - numpydoc - {[testenv]deps} -[testenv:py33] -deps = - typing - {[testenv]deps} -[testenv:py34] -deps = - typing - numpydoc - {[testenv]deps} -[testenv:py35] -deps = - numpydoc - {[testenv]deps} -[testenv:py36] -deps = - numpydoc - {[testenv]deps} [testenv:cov] -deps = - coverage - numpydoc - {[testenv]deps} commands = coverage run --source jedi -m pytest coverage report diff --git a/travis_install.sh b/travis_install.sh index 1142ac3a..2c5eb944 100755 --- a/travis_install.sh +++ b/travis_install.sh @@ -8,10 +8,6 @@ set -e sudo chown root: /opt/python/3.6/bin/python sudo chown root: /opt/python/3.6.3/bin/python -if [[ $JEDI_TEST_ENVIRONMENT == "33" ]]; then - VERSION=3.3 - DOWNLOAD=1 -fi if [[ $JEDI_TEST_ENVIRONMENT == "35" ]]; then VERSION=3.5 DOWNLOAD=1