forked from VimPlug/jedi
add get_in_function_call caching (3s for now)
This commit is contained in:
@@ -40,6 +40,7 @@ import helpers
|
|||||||
import common
|
import common
|
||||||
import builtin
|
import builtin
|
||||||
import api_classes
|
import api_classes
|
||||||
|
import cache
|
||||||
|
|
||||||
from _compatibility import next, unicode
|
from _compatibility import next, unicode
|
||||||
|
|
||||||
@@ -403,7 +404,8 @@ class Script(object):
|
|||||||
|
|
||||||
debug.speed('func_call user_stmt')
|
debug.speed('func_call user_stmt')
|
||||||
with common.scale_speed_settings(settings.scale_get_in_function_call):
|
with common.scale_speed_settings(settings.scale_get_in_function_call):
|
||||||
origins = evaluate.follow_call(call)
|
_callable = lambda: evaluate.follow_call(call)
|
||||||
|
origins = cache.cache_get_in_function_call(_callable, user_stmt)
|
||||||
debug.speed('func_call followed')
|
debug.speed('func_call followed')
|
||||||
|
|
||||||
if len(origins) == 0:
|
if len(origins) == 0:
|
||||||
|
|||||||
@@ -1,6 +1,11 @@
|
|||||||
|
import time
|
||||||
|
import settings
|
||||||
|
|
||||||
# memoize caches will be deleted after every action
|
# memoize caches will be deleted after every action
|
||||||
memoize_caches = []
|
memoize_caches = []
|
||||||
|
|
||||||
|
time_caches = []
|
||||||
|
|
||||||
|
|
||||||
def clear_caches():
|
def clear_caches():
|
||||||
""" Jedi caches many things, that should be completed after each completion
|
""" Jedi caches many things, that should be completed after each completion
|
||||||
@@ -13,6 +18,8 @@ def clear_caches():
|
|||||||
for m in memoize_caches:
|
for m in memoize_caches:
|
||||||
m.clear()
|
m.clear()
|
||||||
|
|
||||||
|
time_caches = []
|
||||||
|
|
||||||
|
|
||||||
def memoize_default(default=None, cache=memoize_caches):
|
def memoize_default(default=None, cache=memoize_caches):
|
||||||
""" This is a typical memoization decorator, BUT there is one difference:
|
""" This is a typical memoization decorator, BUT there is one difference:
|
||||||
@@ -47,3 +54,34 @@ class CachedMetaClass(type):
|
|||||||
@memoize_default()
|
@memoize_default()
|
||||||
def __call__(self, *args, **kwargs):
|
def __call__(self, *args, **kwargs):
|
||||||
return super(CachedMetaClass, self).__call__(*args, **kwargs)
|
return super(CachedMetaClass, self).__call__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def time_cache(time_add_setting):
|
||||||
|
""" This decorator works as follows: Call it with a setting and after that
|
||||||
|
use the function with a callable that returns the key.
|
||||||
|
But: This function is only called if the key is not available. After a
|
||||||
|
certain amount of time (`time_add_setting`) the cache is invalid.
|
||||||
|
"""
|
||||||
|
def _temp(key_func):
|
||||||
|
dct = {}
|
||||||
|
time_caches.append(dct)
|
||||||
|
def wrapper(optional_callable, *args, **kwargs):
|
||||||
|
key = key_func(*args, **kwargs)
|
||||||
|
value = None
|
||||||
|
if key in dct:
|
||||||
|
expiry, value = dct[key]
|
||||||
|
if expiry > time.time():
|
||||||
|
return value
|
||||||
|
value = optional_callable()
|
||||||
|
time_add = getattr(settings, time_add_setting)
|
||||||
|
if key is not None:
|
||||||
|
dct[key] = time.time() + time_add, value
|
||||||
|
return value
|
||||||
|
return wrapper
|
||||||
|
return _temp
|
||||||
|
|
||||||
|
|
||||||
|
@time_cache("get_in_function_call_validity")
|
||||||
|
def cache_get_in_function_call(stmt):
|
||||||
|
module_path = stmt.get_parent_until().path
|
||||||
|
return None if module_path is None else (module_path, stmt.start_pos)
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ fast_parser_always_reparse = False
|
|||||||
# Use the cache (full cache) to generate get_in_function_call's. This may fail
|
# Use the cache (full cache) to generate get_in_function_call's. This may fail
|
||||||
# with multiline docstrings (likely) and other complicated changes to the fail
|
# with multiline docstrings (likely) and other complicated changes to the fail
|
||||||
# (unlikely). The goal is to move away from it by making the rest faster.
|
# (unlikely). The goal is to move away from it by making the rest faster.
|
||||||
use_get_in_function_call_cache = True
|
use_get_in_function_call_cache = False
|
||||||
|
|
||||||
# ----------------
|
# ----------------
|
||||||
# dynamic stuff
|
# dynamic stuff
|
||||||
@@ -94,7 +94,7 @@ scale_get_in_function_call = 0.1
|
|||||||
part_line_length = 20
|
part_line_length = 20
|
||||||
|
|
||||||
# ----------------
|
# ----------------
|
||||||
# star import caching
|
# caching validity (time)
|
||||||
# ----------------
|
# ----------------
|
||||||
|
|
||||||
# In huge packages like numpy, checking all star imports on every completion
|
# In huge packages like numpy, checking all star imports on every completion
|
||||||
@@ -102,3 +102,7 @@ part_line_length = 20
|
|||||||
# time span (in seconds).
|
# time span (in seconds).
|
||||||
|
|
||||||
star_import_cache_validity = 60.0
|
star_import_cache_validity = 60.0
|
||||||
|
|
||||||
|
# Finding function calls might be slow (0.1-0.5s). This is not acceptible for
|
||||||
|
# normal writing. Therefore cache it for a short time.
|
||||||
|
get_in_function_call_validity = 3.0
|
||||||
|
|||||||
Reference in New Issue
Block a user