mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-06 22:14:27 +08:00
Add a completion cache for numpy/tensorflow, fixes #1116
This commit is contained in:
@@ -19,6 +19,7 @@ from jedi.inference.gradual.typeshed import StubModuleValue
|
||||
from jedi.inference.gradual.conversion import convert_names, convert_values
|
||||
from jedi.inference.base_value import ValueSet
|
||||
from jedi.api.keywords import KeywordName
|
||||
from jedi.api import completion_cache
|
||||
|
||||
|
||||
def _sort_names_by_start_pos(names):
|
||||
@@ -241,19 +242,25 @@ class BaseDefinition(object):
|
||||
"""
|
||||
if isinstance(self._name, ImportName) and fast:
|
||||
return ''
|
||||
doc = self._name.py__doc__()
|
||||
doc = self._get_docstring()
|
||||
if raw:
|
||||
return doc
|
||||
|
||||
signature_text = '\n'.join(
|
||||
signature.to_string()
|
||||
for signature in self._get_signatures(for_docstring=True)
|
||||
)
|
||||
signature_text = self._get_docstring_signature()
|
||||
if signature_text and doc:
|
||||
return signature_text + '\n\n' + doc
|
||||
else:
|
||||
return signature_text + doc
|
||||
|
||||
def _get_docstring(self):
|
||||
return self._name.py__doc__()
|
||||
|
||||
def _get_docstring_signature(self):
|
||||
return '\n'.join(
|
||||
signature.to_string()
|
||||
for signature in self._get_signatures(for_docstring=True)
|
||||
)
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
"""
|
||||
@@ -485,7 +492,7 @@ class BaseDefinition(object):
|
||||
return ''.join(lines[start_index:index + after + 1])
|
||||
|
||||
def _get_signatures(self, for_docstring=False):
|
||||
if for_docstring and self.type == 'statement' and not self.is_stub():
|
||||
if for_docstring and self._name.api_type == 'statement' and not self.is_stub():
|
||||
# For docstrings we don't resolve signatures if they are simple
|
||||
# statements and not stubs. This is a speed optimization.
|
||||
return []
|
||||
@@ -508,12 +515,14 @@ class Completion(BaseDefinition):
|
||||
`Completion` objects are returned from :meth:`api.Script.complete`. They
|
||||
provide additional information about a completion.
|
||||
"""
|
||||
def __init__(self, inference_state, name, stack, like_name_length, is_fuzzy):
|
||||
def __init__(self, inference_state, name, stack, like_name_length,
|
||||
is_fuzzy, cached_name=None):
|
||||
super(Completion, self).__init__(inference_state, name)
|
||||
|
||||
self._like_name_length = like_name_length
|
||||
self._stack = stack
|
||||
self._is_fuzzy = is_fuzzy
|
||||
self._cached_name = cached_name
|
||||
|
||||
# Completion objects with the same Completion name (which means
|
||||
# duplicate items in the completion)
|
||||
@@ -575,8 +584,47 @@ class Completion(BaseDefinition):
|
||||
# In this case we can just resolve the like name, because we
|
||||
# wouldn't load like > 100 Python modules anymore.
|
||||
fast = False
|
||||
|
||||
return super(Completion, self).docstring(raw=raw, fast=fast)
|
||||
|
||||
def _get_docstring(self):
|
||||
if self._cached_name is not None:
|
||||
return completion_cache.get_docstring(
|
||||
self._cached_name,
|
||||
self._name.get_public_name(),
|
||||
lambda: self._get_cache()
|
||||
)
|
||||
return super(Completion, self)._get_docstring()
|
||||
|
||||
def _get_docstring_signature(self):
|
||||
if self._cached_name is not None:
|
||||
return completion_cache.get_docstring_signature(
|
||||
self._cached_name,
|
||||
self._name.get_public_name(),
|
||||
lambda: self._get_cache()
|
||||
)
|
||||
return super(Completion, self)._get_docstring_signature()
|
||||
|
||||
def _get_cache(self):
|
||||
typ = super(Completion, self).type
|
||||
return (
|
||||
typ,
|
||||
super(Completion, self)._get_docstring_signature(),
|
||||
super(Completion, self)._get_docstring(),
|
||||
)
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
# Purely a speed optimization.
|
||||
if self._cached_name is not None:
|
||||
return completion_cache.get_type(
|
||||
self._cached_name,
|
||||
self._name.get_public_name(),
|
||||
lambda: self._get_cache()
|
||||
)
|
||||
|
||||
return super(Completion, self).type
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (type(self).__name__, self._name.get_public_name())
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ def get_signature_param_names(signatures):
|
||||
yield ParamNameWithEquals(p._name)
|
||||
|
||||
|
||||
def filter_names(inference_state, completion_names, stack, like_name, fuzzy):
|
||||
def filter_names(inference_state, completion_names, stack, like_name, fuzzy, cached_name):
|
||||
comp_dct = {}
|
||||
if settings.case_insensitive_completion:
|
||||
like_name = like_name.lower()
|
||||
@@ -59,6 +59,7 @@ def filter_names(inference_state, completion_names, stack, like_name, fuzzy):
|
||||
stack,
|
||||
len(like_name),
|
||||
is_fuzzy=fuzzy,
|
||||
cached_name=cached_name,
|
||||
)
|
||||
k = (new.name, new.complete) # key
|
||||
if k in comp_dct and settings.no_completion_duplicates:
|
||||
@@ -142,10 +143,11 @@ class Completion:
|
||||
prefixed_completions = self._complete_in_string(start_leaf, string)
|
||||
return prefixed_completions
|
||||
|
||||
completion_names = self._complete_python(leaf)
|
||||
cached_name, completion_names = self._complete_python(leaf)
|
||||
|
||||
completions = list(filter_names(self._inference_state, completion_names,
|
||||
self.stack, self._like_name, self._fuzzy))
|
||||
self.stack, self._like_name,
|
||||
self._fuzzy, cached_name=cached_name))
|
||||
|
||||
return (
|
||||
# Removing duplicates mostly to remove False/True/None duplicates.
|
||||
@@ -176,6 +178,7 @@ class Completion:
|
||||
self._original_position[0],
|
||||
self._original_position[1] - len(self._like_name)
|
||||
)
|
||||
cached_name = None
|
||||
|
||||
try:
|
||||
self.stack = stack = helpers.get_stack_at_position(
|
||||
@@ -186,10 +189,10 @@ class Completion:
|
||||
if value == '.':
|
||||
# After ErrorLeaf's that are dots, we will not do any
|
||||
# completions since this probably just confuses the user.
|
||||
return []
|
||||
return cached_name, []
|
||||
|
||||
# If we don't have a value, just use global completion.
|
||||
return self._complete_global_scope()
|
||||
return cached_name, self._complete_global_scope()
|
||||
|
||||
allowed_transitions = \
|
||||
list(stack._allowed_transition_names_and_token_types())
|
||||
@@ -245,7 +248,7 @@ class Completion:
|
||||
if nodes and nodes[-1] in ('as', 'def', 'class'):
|
||||
# No completions for ``with x as foo`` and ``import x as foo``.
|
||||
# Also true for defining names as a class or function.
|
||||
return list(self._complete_inherited(is_function=True))
|
||||
return cached_name, list(self._complete_inherited(is_function=True))
|
||||
elif "import_stmt" in nonterminals:
|
||||
level, names = parse_dotted_names(nodes, "import_from" in nonterminals)
|
||||
|
||||
@@ -257,7 +260,8 @@ class Completion:
|
||||
)
|
||||
elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
|
||||
dot = self._module_node.get_leaf_for_position(self._position)
|
||||
completion_names += self._complete_trailer(dot.get_previous_leaf())
|
||||
cached_name, n = self._complete_trailer(dot.get_previous_leaf())
|
||||
completion_names += n
|
||||
elif self._is_parameter_completion():
|
||||
completion_names += self._complete_params(leaf)
|
||||
else:
|
||||
@@ -276,7 +280,7 @@ class Completion:
|
||||
signatures = self._signatures_callback(*self._position)
|
||||
completion_names += get_signature_param_names(signatures)
|
||||
|
||||
return completion_names
|
||||
return cached_name, completion_names
|
||||
|
||||
def _is_parameter_completion(self):
|
||||
tos = self.stack[-1]
|
||||
@@ -337,13 +341,26 @@ class Completion:
|
||||
inferred_context = self._module_context.create_context(previous_leaf)
|
||||
values = infer_call_of_leaf(inferred_context, previous_leaf)
|
||||
debug.dbg('trailer completion values: %s', values, color='MAGENTA')
|
||||
return self._complete_trailer_for_values(values)
|
||||
|
||||
# The cached name simply exists to make speed optimizations for certain
|
||||
# modules.
|
||||
cached_name = None
|
||||
if len(values) == 1:
|
||||
v, = values
|
||||
if v.is_module():
|
||||
if len(v.string_names) == 1:
|
||||
module_name = v.string_names[0]
|
||||
if module_name in ('numpy', 'tensorflow', 'matplotlib', 'pandas'):
|
||||
cached_name = module_name
|
||||
|
||||
return cached_name, self._complete_trailer_for_values(values)
|
||||
|
||||
def _complete_trailer_for_values(self, values):
|
||||
user_value = get_user_context(self._module_context, self._position)
|
||||
user_context = get_user_context(self._module_context, self._position)
|
||||
|
||||
completion_names = []
|
||||
for value in values:
|
||||
for filter in value.get_filters(origin_scope=user_value.tree_node):
|
||||
for filter in value.get_filters(origin_scope=user_context.tree_node):
|
||||
completion_names += filter.values()
|
||||
|
||||
if not value.is_stub() and isinstance(value, TreeInstance):
|
||||
@@ -352,7 +369,7 @@ class Completion:
|
||||
python_values = convert_values(values)
|
||||
for c in python_values:
|
||||
if c not in values:
|
||||
for filter in c.get_filters(origin_scope=user_value.tree_node):
|
||||
for filter in c.get_filters(origin_scope=user_context.tree_node):
|
||||
completion_names += filter.values()
|
||||
return completion_names
|
||||
|
||||
|
||||
25
jedi/api/completion_cache.py
Normal file
25
jedi/api/completion_cache.py
Normal file
@@ -0,0 +1,25 @@
|
||||
_cache = {}
|
||||
|
||||
|
||||
def save_entry(module_name, name, cache):
|
||||
try:
|
||||
module_cache = _cache[module_name]
|
||||
except KeyError:
|
||||
module_cache = _cache[module_name] = {}
|
||||
module_cache[name] = cache
|
||||
|
||||
|
||||
def _create_get_from_cache(number):
|
||||
def _get_from_cache(module_name, name, get_cache_values):
|
||||
try:
|
||||
return _cache[module_name][name][number]
|
||||
except KeyError:
|
||||
v = get_cache_values()
|
||||
save_entry(module_name, name, v)
|
||||
return v[number]
|
||||
return _get_from_cache
|
||||
|
||||
|
||||
get_type = _create_get_from_cache(0)
|
||||
get_docstring_signature = _create_get_from_cache(1)
|
||||
get_docstring = _create_get_from_cache(2)
|
||||
@@ -446,7 +446,7 @@ def import_module(inference_state, import_names, parent_module_value, sys_path):
|
||||
from jedi.inference.value.namespace import ImplicitNamespaceValue
|
||||
module = ImplicitNamespaceValue(
|
||||
inference_state,
|
||||
fullname=file_io_or_ns.name,
|
||||
string_names=tuple(file_io_or_ns.name.split('.')),
|
||||
paths=file_io_or_ns.paths,
|
||||
)
|
||||
elif file_io_or_ns is None:
|
||||
|
||||
@@ -26,10 +26,10 @@ class ImplicitNamespaceValue(Value, SubModuleDictMixin):
|
||||
api_type = u'module'
|
||||
parent_context = None
|
||||
|
||||
def __init__(self, inference_state, fullname, paths):
|
||||
def __init__(self, inference_state, string_names, paths):
|
||||
super(ImplicitNamespaceValue, self).__init__(inference_state, parent_context=None)
|
||||
self.inference_state = inference_state
|
||||
self._fullname = fullname
|
||||
self.string_names = string_names
|
||||
self._paths = paths
|
||||
|
||||
def get_filters(self, origin_scope=None):
|
||||
@@ -47,13 +47,13 @@ class ImplicitNamespaceValue(Value, SubModuleDictMixin):
|
||||
def py__package__(self):
|
||||
"""Return the fullname
|
||||
"""
|
||||
return self._fullname.split('.')
|
||||
return self.string_names
|
||||
|
||||
def py__path__(self):
|
||||
return self._paths
|
||||
|
||||
def py__name__(self):
|
||||
return self._fullname
|
||||
return '.'.join(self.string_names)
|
||||
|
||||
def is_namespace(self):
|
||||
return True
|
||||
@@ -68,4 +68,4 @@ class ImplicitNamespaceValue(Value, SubModuleDictMixin):
|
||||
return NamespaceContext(self)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._fullname)
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.py__name__())
|
||||
|
||||
Reference in New Issue
Block a user