diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 5aa12521..61bbe79d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -15,6 +15,8 @@ Changelog - Completion for "proxies" works now. These are classes that have a ``__getattr__(self, name)`` method that does a ``return getattr(x, name)``. - Understanding of Pytest fixtures. +- Tensorflow, Numpy and Pandas completions should now be about 4-10x faster + after loading them initially. - Big **Script API Changes**: - The line and column parameters of ``jedi.Script`` are now deprecated - ``completions`` deprecated, use ``complete`` instead diff --git a/test/conftest.py b/test/conftest.py index 76ba6e1a..b023f185 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,16 +1,17 @@ import os -import re import subprocess +from itertools import count import pytest from . import helpers from . import run from . import refactor - -import jedi from jedi.api.environment import InterpreterEnvironment from jedi.inference.compiled.value import create_from_access_path +from jedi.inference.imports import _load_python_module +from jedi.file_io import KnownContentFileIO +from jedi.inference.base_value import ValueSet def pytest_addoption(parser): @@ -144,3 +145,16 @@ def create_compiled_object(inference_state): inference_state, inference_state.compiled_subprocess.create_simple_object(obj) ) + + +@pytest.fixture +def module_injector(): + counter = count() + + def module_injector(inference_state, names, code): + assert isinstance(names, tuple) + file_io = KnownContentFileIO('/foo/bar/module-injector-%s.py' % next(counter), code) + v = _load_python_module(inference_state, file_io, names) + inference_state.module_cache.add(names, ValueSet([v])) + + return module_injector diff --git a/test/test_api/test_completion.py b/test/test_api/test_completion.py index 8aa47b0c..db078293 100644 --- a/test/test_api/test_completion.py +++ b/test/test_api/test_completion.py @@ -392,3 +392,33 @@ def test_fuzzy_match(): def test_ellipsis_completion(Script): assert Script('...').complete() == [] + + +def test_completion_cache(Script, module_injector): + """ + For some modules like numpy, tensorflow or pandas we cache docstrings and + type to avoid them slowing us down, because they are huge. + """ + script = Script('import numpy; numpy.foo') + module_injector(script._inference_state, ('numpy',), 'def foo(a): "doc"') + c, = script.complete() + assert c.name == 'foo' + assert c.type == 'function' + assert c.docstring() == 'foo(a)\n\ndoc' + + code = dedent('''\ + class foo: + 'doc2' + def __init__(self): + pass + ''') + script = Script('import numpy; numpy.foo') + module_injector(script._inference_state, ('numpy',), code) + # The outpus should still be the same + c, = script.complete() + assert c.name == 'foo' + assert c.type == 'function' + assert c.docstring() == 'foo(a)\n\ndoc' + cls, = c.infer() + assert cls.type == 'class' + assert cls.docstring() == 'foo()\n\ndoc2'