forked from VimPlug/jedi
Remove the old star import cache, because it's not even used.
This commit is contained in:
@@ -133,7 +133,6 @@ class Script(object):
|
|||||||
|
|
||||||
@cache.memoize_method
|
@cache.memoize_method
|
||||||
def _get_module_node(self):
|
def _get_module_node(self):
|
||||||
cache.invalidate_star_import_cache(self._path)
|
|
||||||
parser = FastParser(self._grammar, self._source, self.path)
|
parser = FastParser(self._grammar, self._source, self.path)
|
||||||
save_parser(self.path, parser, pickling=False)
|
save_parser(self.path, parser, pickling=False)
|
||||||
|
|
||||||
|
|||||||
@@ -90,31 +90,3 @@ def memoize_method(method):
|
|||||||
dct[key] = result
|
dct[key] = result
|
||||||
return result
|
return result
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
def _invalidate_star_import_cache_module(module, only_main=False):
|
|
||||||
""" Important if some new modules are being reparsed """
|
|
||||||
try:
|
|
||||||
t, modules = _time_caches['star_import_cache_validity'][module]
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
del _time_caches['star_import_cache_validity'][module]
|
|
||||||
|
|
||||||
# This stuff was part of load_parser. However since we're most likely
|
|
||||||
# not going to use star import caching anymore, just ignore it.
|
|
||||||
#else:
|
|
||||||
# In case there is already a module cached and this module
|
|
||||||
# has to be reparsed, we also need to invalidate the import
|
|
||||||
# caches.
|
|
||||||
# _invalidate_star_import_cache_module(parser_cache_item.parser.module)
|
|
||||||
|
|
||||||
|
|
||||||
def invalidate_star_import_cache(path):
|
|
||||||
"""On success returns True."""
|
|
||||||
try:
|
|
||||||
parser_cache_item = parser_cache[path]
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
_invalidate_star_import_cache_module(parser_cache_item.parser.module)
|
|
||||||
|
|||||||
@@ -196,8 +196,8 @@ class ParserWithRecovery(Parser):
|
|||||||
|
|
||||||
def parse(self, tokenizer):
|
def parse(self, tokenizer):
|
||||||
root_node = super(ParserWithRecovery, self).parse(self._tokenize(tokenizer))
|
root_node = super(ParserWithRecovery, self).parse(self._tokenize(tokenizer))
|
||||||
|
root_node.path = self._module_path
|
||||||
self.module = root_node
|
self.module = root_node
|
||||||
self.module.path = self._module_path
|
|
||||||
return root_node
|
return root_node
|
||||||
|
|
||||||
def error_recovery(self, grammar, stack, arcs, typ, value, start_pos, prefix,
|
def error_recovery(self, grammar, stack, arcs, typ, value, start_pos, prefix,
|
||||||
|
|||||||
@@ -68,7 +68,6 @@ definitely worse in some cases. But a completion should also be fast.
|
|||||||
Caching
|
Caching
|
||||||
~~~~~~~
|
~~~~~~~
|
||||||
|
|
||||||
.. autodata:: star_import_cache_validity
|
|
||||||
.. autodata:: call_signatures_validity
|
.. autodata:: call_signatures_validity
|
||||||
|
|
||||||
|
|
||||||
@@ -217,13 +216,6 @@ scale `max_executions` and `max_until_execution_unique`:
|
|||||||
# caching validity (time)
|
# caching validity (time)
|
||||||
# ----------------
|
# ----------------
|
||||||
|
|
||||||
star_import_cache_validity = 60.0
|
|
||||||
"""
|
|
||||||
In huge packages like numpy, checking all star imports on every completion
|
|
||||||
might be slow, therefore we do a star import caching, that lasts a certain
|
|
||||||
time span (in seconds).
|
|
||||||
"""
|
|
||||||
|
|
||||||
call_signatures_validity = 3.0
|
call_signatures_validity = 3.0
|
||||||
"""
|
"""
|
||||||
Finding function calls might be slow (0.1-0.5s). This is not acceptible for
|
Finding function calls might be slow (0.1-0.5s). This is not acceptible for
|
||||||
|
|||||||
Reference in New Issue
Block a user