1
0
forked from VimPlug/jedi

Add a generator cache for py__mro__

This commit is contained in:
Dave Halter
2018-12-03 00:51:45 +01:00
parent a7c21eff4b
commit 3c3ad7b240
2 changed files with 62 additions and 26 deletions

View File

@@ -4,7 +4,10 @@
- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes.
"""
from jedi import debug
_NO_DEFAULT = object()
_RECURSION_SENTINEL = object()
def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False):
@@ -28,8 +31,7 @@ def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_a
try:
memo = cache[function]
except KeyError:
memo = {}
cache[function] = memo
cache[function] = memo = {}
key = (obj, args, frozenset(kwargs.items()))
if key in memo:
@@ -75,3 +77,47 @@ class CachedMetaClass(type):
@evaluator_as_method_param_cache()
def __call__(self, *args, **kwargs):
return super(CachedMetaClass, self).__call__(*args, **kwargs)
def evaluator_method_generator_cache():
"""
This is a special memoizer. It memoizes generators and also checks for
recursion errors and returns no further iterator elemends in that case.
"""
def func(function):
def wrapper(obj, *args, **kwargs):
cache = obj.evaluator.memoize_cache
try:
memo = cache[function]
except KeyError:
cache[function] = memo = {}
key = (obj, args, frozenset(kwargs.items()))
if key in memo:
actual_generator, cached_lst = memo[key]
else:
actual_generator = function(obj, *args, **kwargs)
cached_lst = []
memo[key] = actual_generator, cached_lst
i = 0
while True:
try:
next_element = cached_lst[i]
if next_element is _RECURSION_SENTINEL:
debug.warning('Found a generator recursion for %s' % obj)
# This means we have hit a recursion.
return
except IndexError:
cached_lst.append(_RECURSION_SENTINEL)
next_element = next(actual_generator, None)
if next_element is None:
cached_lst.pop()
return
cached_lst[-1] = next_element
yield next_element
i += 1
return wrapper
return func

View File

@@ -40,7 +40,8 @@ py__doc__(include_call_signature: Returns the docstring for a context.
from jedi import debug
from jedi._compatibility import use_metaclass
from jedi.parser_utils import get_parent_scope
from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass
from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass, \
evaluator_method_generator_cache
from jedi.evaluate import compiled
from jedi.evaluate.lazy_context import LazyKnownContexts
from jedi.evaluate.filters import ParserTreeFilter, TreeNameDefinition, \
@@ -141,26 +142,13 @@ class ClassMixin(object):
return list(context_.get_param_names())[1:]
return []
@evaluator_method_cache(default=())
def py__mro__(context):
try:
# TODO is this really needed?
method = context.py__mro__
except AttributeError:
pass
else:
if not isinstance(context, ClassMixin):
# Currently only used for compiled objects.
return method()
def add(cls):
if cls not in mro:
mro.append(cls)
mro = [context]
@evaluator_method_generator_cache()
def py__mro__(self):
mro = [self]
yield self
# TODO Do a proper mro resolution. Currently we are just listing
# classes. However, it's a complicated algorithm.
for lazy_cls in context.py__bases__():
for lazy_cls in self.py__bases__():
# TODO there's multiple different mro paths possible if this yields
# multiple possibilities. Could be changed to be more correct.
for cls in lazy_cls.infer():
@@ -180,12 +168,14 @@ class ClassMixin(object):
File "<stdin>", line 1, in <module>
TypeError: int() takes at most 2 arguments (3 given)
"""
debug.warning('Super class of %s is not a class: %s', context, cls)
debug.warning('Super class of %s is not a class: %s', self, cls)
else:
add(cls)
for cls_new in cls.py__mro__():
add(cls_new)
return tuple(mro)
if cls not in mro:
mro.append(cls)
yield cls
for cls_new in cls.py__mro__():
mro.append(cls_new)
yield cls_new
def _create_class_filter(self, cls, origin_scope, is_instance):
return ClassFilter(