forked from VimPlug/jedi
More fixes to *args type inference.
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
from jedi.common import unite
|
||||
class Context(object):
|
||||
type = None # TODO remove
|
||||
|
||||
@@ -35,3 +36,56 @@ class FlowContext(TreeContext):
|
||||
def get_parent_flow_context(self):
|
||||
if 1:
|
||||
return self.parent_context
|
||||
|
||||
|
||||
class AbstractLazyContext(object):
|
||||
def __init__(self, data):
|
||||
self._data = data
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._data)
|
||||
|
||||
def infer(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class LazyKnownContext(AbstractLazyContext):
|
||||
"""data is a context."""
|
||||
def infer(self):
|
||||
return set([self._data])
|
||||
|
||||
|
||||
class LazyKnownContexts(AbstractLazyContext):
|
||||
"""data is a set of contexts."""
|
||||
def infer(self):
|
||||
return self._data
|
||||
|
||||
|
||||
class LazyUnknownContext(AbstractLazyContext):
|
||||
def __init__(self):
|
||||
super(LazyUnknownContext, self).__init__(None)
|
||||
|
||||
def infer(self):
|
||||
return set()
|
||||
|
||||
|
||||
class LazyTreeContext(AbstractLazyContext):
|
||||
def __init__(self, context, node):
|
||||
self._context = context
|
||||
self._data = node
|
||||
|
||||
def infer(self):
|
||||
return self._context.eval_node(self._data)
|
||||
|
||||
|
||||
def get_merged_lazy_context(lazy_contexts):
|
||||
if len(lazy_contexts) > 1:
|
||||
return MergedLazyContexts(lazy_contexts)
|
||||
else:
|
||||
return lazy_contexts[0]
|
||||
|
||||
|
||||
class MergedLazyContexts(AbstractLazyContext):
|
||||
"""data is a list of lazy contexts."""
|
||||
def infer(self):
|
||||
return unite(l.infer() for l in self._data)
|
||||
|
||||
@@ -32,16 +32,18 @@ from jedi.evaluate import analysis
|
||||
from jedi.evaluate import pep0484
|
||||
from jedi import common
|
||||
from jedi.evaluate.filters import DictFilter
|
||||
from jedi.evaluate.context import Context
|
||||
from jedi.evaluate import context
|
||||
|
||||
|
||||
class AbstractSequence(Context):
|
||||
class AbstractSequence(context.Context):
|
||||
_array_type = None
|
||||
|
||||
def get_filters(self, search_global, until_position=None, origin_scope=None):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return compiled.CompiledContextName(self, self.type)
|
||||
return compiled.CompiledContextName(self, self._array_type)
|
||||
|
||||
|
||||
class IterableWrapper(tree.Base):
|
||||
@@ -135,7 +137,7 @@ class GeneratorMixin(object):
|
||||
return gen_obj.py__class__()
|
||||
|
||||
|
||||
class Generator(Context, GeneratorMixin):
|
||||
class Generator(context.Context, GeneratorMixin):
|
||||
"""Handling of `yield` functions."""
|
||||
|
||||
def __init__(self, evaluator, func_execution_context):
|
||||
@@ -145,17 +147,8 @@ class Generator(Context, GeneratorMixin):
|
||||
def py__iter__(self):
|
||||
return self._func_execution_context.get_yield_values()
|
||||
|
||||
def __getattr__(self, name):
|
||||
raise NotImplementedError
|
||||
if name not in ['start_pos', 'end_pos', 'parent', 'get_imports',
|
||||
'doc', 'docstr', 'get_parent_until',
|
||||
'get_code', 'subscopes']:
|
||||
raise AttributeError("Accessing %s of %s is not allowed."
|
||||
% (self, name))
|
||||
return getattr(self.func, name)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s of %s>" % (type(self).__name__, self.func)
|
||||
return "<%s of %s>" % (type(self).__name__, self._func_execution_context)
|
||||
|
||||
|
||||
class Comprehension(IterableWrapper):
|
||||
@@ -235,6 +228,7 @@ class Comprehension(IterableWrapper):
|
||||
yield result
|
||||
|
||||
def py__iter__(self):
|
||||
raise NotImplementedError
|
||||
return self._iterate()
|
||||
|
||||
def __repr__(self):
|
||||
@@ -319,6 +313,7 @@ class DictComprehension(Comprehension, ArrayMixin):
|
||||
return self._get_comprehension().children[3]
|
||||
|
||||
def py__iter__(self):
|
||||
raise NotImplementedError
|
||||
for keys, values in self._iterate():
|
||||
yield keys
|
||||
|
||||
@@ -351,9 +346,10 @@ class ArrayLiteralContext(AbstractSequence, ArrayMixin):
|
||||
'[': 'list',
|
||||
'{': 'dict'}
|
||||
|
||||
def __init__(self, evaluator, parent_context, atom):
|
||||
super(ArrayLiteralContext, self).__init__(evaluator, parent_context)
|
||||
def __init__(self, evaluator, defining_context, atom):
|
||||
super(ArrayLiteralContext, self).__init__(evaluator, evaluator.BUILTINS)
|
||||
self.atom = atom
|
||||
self._defining_context = defining_context
|
||||
|
||||
if self.atom.type in ('testlist_star_expr', 'testlist'):
|
||||
self._array_type = 'tuple'
|
||||
@@ -381,7 +377,7 @@ class ArrayLiteralContext(AbstractSequence, ArrayMixin):
|
||||
if isinstance(index, slice):
|
||||
return set([self])
|
||||
else:
|
||||
return self.parent_context.eval_node(self._items()[index])
|
||||
return self._defining_context.eval_node(self._items()[index])
|
||||
|
||||
# @memoize_default()
|
||||
def py__iter__(self):
|
||||
@@ -399,8 +395,8 @@ class ArrayLiteralContext(AbstractSequence, ArrayMixin):
|
||||
for _ in types:
|
||||
yield types
|
||||
else:
|
||||
for value in self._items():
|
||||
yield self.parent_context.eval_node(value)
|
||||
for node in self._items():
|
||||
yield context.LazyTreeContext(self._defining_context, node)
|
||||
|
||||
additions = check_array_additions(self._evaluator, self)
|
||||
if additions:
|
||||
@@ -520,6 +516,7 @@ class FakeDict(_FakeArray):
|
||||
self._dct = dct
|
||||
|
||||
def py__iter__(self):
|
||||
raise NotImplementedError
|
||||
yield set(compiled.create(self._evaluator, key) for key in self._dct)
|
||||
|
||||
def py__getitem__(self, index):
|
||||
@@ -537,6 +534,7 @@ class MergedArray(_FakeArray):
|
||||
self._arrays = arrays
|
||||
|
||||
def py__iter__(self):
|
||||
raise NotImplementedError
|
||||
for array in self._arrays:
|
||||
for types in array.py__iter__():
|
||||
yield types
|
||||
@@ -603,11 +601,9 @@ def py__iter__(evaluator, types, node=None):
|
||||
message="TypeError: '%s' object is not iterable" % typ)
|
||||
else:
|
||||
type_iters.append(iter_method())
|
||||
#for result in iter_method():
|
||||
#yield result
|
||||
|
||||
for t in zip_longest(*type_iters, fillvalue=set()):
|
||||
yield unite(t)
|
||||
for lazy_contexts in zip_longest(*type_iters, fillvalue=set()):
|
||||
yield context.get_merged_lazy_context(lazy_contexts)
|
||||
|
||||
|
||||
def py__iter__types(evaluator, types, node=None):
|
||||
@@ -615,7 +611,7 @@ def py__iter__types(evaluator, types, node=None):
|
||||
Calls `py__iter__`, but ignores the ordering in the end and just returns
|
||||
all types that it contains.
|
||||
"""
|
||||
return unite(py__iter__(evaluator, types, node))
|
||||
return unite(lazy_context.infer() for lazy_context in py__iter__(evaluator, types, node))
|
||||
|
||||
|
||||
def py__getitem__(evaluator, context, types, trailer):
|
||||
@@ -820,6 +816,7 @@ class _ArrayInstance(IterableWrapper):
|
||||
self.var_args = instance.var_args
|
||||
|
||||
def py__iter__(self):
|
||||
raise NotImplementedError
|
||||
try:
|
||||
_, first_nodes = next(self.var_args.unpack())
|
||||
except StopIteration:
|
||||
|
||||
@@ -8,6 +8,7 @@ from jedi.parser import tree
|
||||
from jedi.evaluate import iterable
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate import precedence
|
||||
from jedi.evaluate import context
|
||||
|
||||
|
||||
def try_iter_content(types, depth=0):
|
||||
@@ -134,7 +135,7 @@ class TreeArguments(AbstractArguments):
|
||||
for a in arrays]
|
||||
iterators = list(iterators)
|
||||
for values in list(zip_longest(*iterators)):
|
||||
yield None, MergedLazyContexts(values)
|
||||
yield None, context.get_merged_lazy_context(values)
|
||||
elif stars == 2:
|
||||
arrays = self._evaluator.eval_element(self._context, el)
|
||||
dicts = [_star_star_dict(self._evaluator, a, el, func)
|
||||
@@ -146,14 +147,14 @@ class TreeArguments(AbstractArguments):
|
||||
if tree.is_node(el, 'argument'):
|
||||
c = el.children
|
||||
if len(c) == 3: # Keyword argument.
|
||||
named_args.append((c[0].value, LazyContext(self._context, c[2]),))
|
||||
named_args.append((c[0].value, context.LazyTreeContext(self._context, c[2]),))
|
||||
else: # Generator comprehension.
|
||||
# Include the brackets with the parent.
|
||||
comp = iterable.GeneratorComprehension(
|
||||
self._evaluator, self.argument_node.parent)
|
||||
yield None, KnownContext(comp)
|
||||
yield None, context.LazyKnownContext(comp)
|
||||
else:
|
||||
yield None, LazyContext(self._context, el)
|
||||
yield None, context.LazyTreeContext(self._context, el)
|
||||
|
||||
# Reordering var_args is necessary, because star args sometimes appear
|
||||
# after named argument, but in the actual order it's prepended.
|
||||
@@ -176,51 +177,13 @@ class TreeArguments(AbstractArguments):
|
||||
return _get_calling_var_args(self._evaluator, self)
|
||||
|
||||
|
||||
class KnownContext(object):
|
||||
def __init__(self, value):
|
||||
self._value = value
|
||||
|
||||
def infer(self):
|
||||
return set([self._value])
|
||||
|
||||
|
||||
class KnownContexts(object):
|
||||
def __init__(self, values):
|
||||
self._values = values
|
||||
|
||||
def infer(self):
|
||||
return self._values
|
||||
|
||||
|
||||
class UnknownContext(object):
|
||||
def infer(self):
|
||||
return set()
|
||||
|
||||
|
||||
class LazyContext(object):
|
||||
def __init__(self, context, node):
|
||||
self._context = context
|
||||
self._node = node
|
||||
|
||||
def infer(self):
|
||||
return self._context.eval_node(self._node)
|
||||
|
||||
|
||||
class MergedLazyContexts(object):
|
||||
def __init__(self, lazy_contexts):
|
||||
self._lazy_contexts = lazy_contexts
|
||||
|
||||
def infer(self):
|
||||
return common.unite(l.infer() for l in self._lazy_contexts)
|
||||
|
||||
|
||||
class ValueArguments(AbstractArguments):
|
||||
def __init__(self, value_list):
|
||||
self._value_list = value_list
|
||||
|
||||
def unpack(self, func=None):
|
||||
for value in self._value_list:
|
||||
yield None, KnownContext(value)
|
||||
yield None, context.LazyKnownContext(value)
|
||||
|
||||
def get_calling_var_args(self):
|
||||
return None
|
||||
@@ -295,7 +258,10 @@ def get_params(evaluator, parent_context, func, var_args):
|
||||
# args / kwargs will just be empty arrays / dicts, respectively.
|
||||
# Wrong value count is just ignored. If you try to test cases that are
|
||||
# not allowed in Python, Jedi will maybe not show any completions.
|
||||
default = None if param.default is None else LazyContext(parent_context, param.default)
|
||||
default = None
|
||||
if param.default is not None:
|
||||
default = context.LazyTreeContext(parent_context, param.default)
|
||||
|
||||
key, argument = next(var_arg_iterator, (None, default))
|
||||
while key is not None:
|
||||
keys_only = True
|
||||
@@ -335,17 +301,17 @@ def get_params(evaluator, parent_context, func, var_args):
|
||||
break
|
||||
values_list.append([argument])
|
||||
seq = iterable.FakeSequence(evaluator, 'tuple', values_list)
|
||||
result_arg = KnownContext(seq)
|
||||
result_arg = context.LazyKnownContext(seq)
|
||||
elif param.stars == 2:
|
||||
# **kwargs param
|
||||
dct = iterable.FakeDict(evaluator, dict(non_matching_keys))
|
||||
result_arg = KnownContext(dct)
|
||||
result_arg = context.LazyKnownContext(dct)
|
||||
non_matching_keys = {}
|
||||
else:
|
||||
# normal param
|
||||
if argument is None:
|
||||
# No value: Return an empty container
|
||||
result_arg = UnknownContext()
|
||||
result_arg = context.LazyUnknownContext()
|
||||
if not keys_only:
|
||||
calling_va = var_args.get_calling_var_args()
|
||||
if calling_va is not None:
|
||||
@@ -366,8 +332,8 @@ def get_params(evaluator, parent_context, func, var_args):
|
||||
# there's nothing to find for certain names.
|
||||
for k in set(param_dict) - set(keys_used):
|
||||
param = param_dict[k]
|
||||
result_arg = (UnknownContext() if param.default is None else
|
||||
LazyContext(parent_context, param.default))
|
||||
result_arg = (context.LazyUnknownContext() if param.default is None else
|
||||
context.LazyTreeContext(parent_context, param.default))
|
||||
result_params.append(ExecutedParam(param, var_args, result_arg))
|
||||
|
||||
if not (non_matching_keys or had_multiple_value_error or
|
||||
@@ -412,22 +378,17 @@ def get_params(evaluator, parent_context, func, var_args):
|
||||
|
||||
|
||||
def _iterate_star_args(evaluator, array, input_node, func=None):
|
||||
from jedi.evaluate.representation import Instance
|
||||
if isinstance(array, iterable.AbstractSequence):
|
||||
raise DeprecationWarning('_items? seriously?')
|
||||
# TODO ._items is not the call we want here. Replace in the future.
|
||||
for node in array._items():
|
||||
yield node
|
||||
elif isinstance(array, iterable.Generator):
|
||||
for types in array.py__iter__():
|
||||
yield KnownContexts(types)
|
||||
elif isinstance(array, Instance) and array.name.get_code() == 'tuple':
|
||||
debug.warning('Ignored a tuple *args input %s' % array)
|
||||
else:
|
||||
try:
|
||||
iter_ = array.py__iter__
|
||||
except AttributeError:
|
||||
if func is not None:
|
||||
# TODO this func should not be needed.
|
||||
m = "TypeError: %s() argument after * must be a sequence, not %s" \
|
||||
% (func.name.value, array)
|
||||
analysis.add(evaluator, 'type-error-star', input_node, message=m)
|
||||
else:
|
||||
for lazy_context in iter_():
|
||||
yield lazy_context
|
||||
|
||||
|
||||
def _star_star_dict(evaluator, array, input_node, func):
|
||||
|
||||
@@ -117,12 +117,12 @@ def is_literal(obj):
|
||||
|
||||
def _is_tuple(obj):
|
||||
from jedi.evaluate import iterable
|
||||
return isinstance(obj, iterable.Array) and obj.type == 'tuple'
|
||||
return isinstance(obj, iterable.AbstractSequence) and obj.type == 'tuple'
|
||||
|
||||
|
||||
def _is_list(obj):
|
||||
from jedi.evaluate import iterable
|
||||
return isinstance(obj, iterable.Array) and obj.type == 'list'
|
||||
return isinstance(obj, iterable.AbstractSequence) and obj.type == 'list'
|
||||
|
||||
|
||||
def _element_calculate(evaluator, left, operator, right):
|
||||
|
||||
@@ -57,11 +57,11 @@ from jedi.evaluate import flow_analysis
|
||||
from jedi.evaluate import imports
|
||||
from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \
|
||||
GlobalNameFilter, DictFilter, ContextName
|
||||
from jedi.evaluate.context import TreeContext
|
||||
from jedi.evaluate import context
|
||||
from jedi.evaluate.instance import TreeInstance
|
||||
|
||||
|
||||
class Executed(TreeContext):
|
||||
class Executed(context.TreeContext):
|
||||
"""
|
||||
An instance is also an executable - because __init__ is called
|
||||
:param var_args: The param input array, consist of a parser node or a list.
|
||||
@@ -412,7 +412,7 @@ class Wrapper(tree.Base):
|
||||
return ContextName(self, name)
|
||||
|
||||
|
||||
class ClassContext(use_metaclass(CachedMetaClass, TreeContext, Wrapper)):
|
||||
class ClassContext(use_metaclass(CachedMetaClass, context.TreeContext, Wrapper)):
|
||||
"""
|
||||
This class is not only important to extend `tree.Class`, it is also a
|
||||
important for descriptors (if the descriptor methods are evaluated or not).
|
||||
@@ -518,7 +518,7 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext, Wrapper)):
|
||||
return "<e%s of %s>" % (type(self).__name__, self.base)
|
||||
|
||||
|
||||
class FunctionContext(use_metaclass(CachedMetaClass, TreeContext, Wrapper)):
|
||||
class FunctionContext(use_metaclass(CachedMetaClass, context.TreeContext, Wrapper)):
|
||||
"""
|
||||
Needed because of decorators. Decorators are evaluated here.
|
||||
"""
|
||||
@@ -689,18 +689,19 @@ class FunctionExecutionContext(Executed):
|
||||
types |= set(pep0484.find_return_types(self._evaluator, funcdef))
|
||||
|
||||
for r in returns:
|
||||
types |= self.eval_node(r.children[1])
|
||||
check = flow_analysis.reachability_check(self, funcdef, r)
|
||||
if check is flow_analysis.UNREACHABLE:
|
||||
debug.dbg('Return unreachable: %s', r)
|
||||
else:
|
||||
if check_yields:
|
||||
types |= iterable.unite(self._eval_yield(r))
|
||||
types |= set(self._eval_yield(r))
|
||||
else:
|
||||
types |= self.eval_node(r.children[1])
|
||||
if check is flow_analysis.REACHABLE:
|
||||
debug.dbg('Return reachable: %s', r)
|
||||
break
|
||||
if check_yields:
|
||||
return context.get_merged_lazy_context(list(types))
|
||||
return types
|
||||
|
||||
def _eval_yield(self, yield_expr):
|
||||
@@ -708,10 +709,10 @@ class FunctionExecutionContext(Executed):
|
||||
if node.type == 'yield_arg':
|
||||
# It must be a yield from.
|
||||
yield_from_types = self.eval_node(node)
|
||||
for result in iterable.py__iter__(self._evaluator, yield_from_types, node):
|
||||
yield result
|
||||
for lazy_context in iterable.py__iter__(self._evaluator, yield_from_types, node):
|
||||
yield lazy_context
|
||||
else:
|
||||
yield self.eval_node(node)
|
||||
yield context.LazyTreeContext(self, node)
|
||||
|
||||
@recursion.execution_recursion_decorator
|
||||
def get_yield_values(self):
|
||||
@@ -784,7 +785,7 @@ class GlobalName(helpers.FakeName):
|
||||
name.start_pos, is_definition=True)
|
||||
|
||||
|
||||
class ModuleContext(use_metaclass(CachedMetaClass, TreeContext, Wrapper)):
|
||||
class ModuleContext(use_metaclass(CachedMetaClass, context.TreeContext, Wrapper)):
|
||||
parent_context = None
|
||||
|
||||
def __init__(self, evaluator, module, parent_module=None):
|
||||
|
||||
Reference in New Issue
Block a user