1
0
forked from VimPlug/jedi

Fixes to decorators and *arg functions.

This commit is contained in:
Dave Halter
2016-10-31 09:19:58 +01:00
parent c537d360f3
commit 6d8ff9293d
7 changed files with 97 additions and 128 deletions

View File

@@ -410,9 +410,6 @@ class Evaluator(object):
if self.is_analysis: if self.is_analysis:
arguments.eval_all() arguments.eval_all()
if isinstance(obj, er.FunctionContext):
obj = obj.get_decorated_func()
debug.dbg('execute: %s %s', obj, arguments) debug.dbg('execute: %s %s', obj, arguments)
try: try:
# Some stdlib functions like super(), namedtuple(), etc. have been # Some stdlib functions like super(), namedtuple(), etc. have been

View File

@@ -23,8 +23,8 @@ class Context(object):
""" """
Execute a function with already executed arguments. Execute a function with already executed arguments.
""" """
from jedi.evaluate.param import ValueArguments from jedi.evaluate.param import ValuesArguments
return self.execute(ValueArguments(value_list)) return self.execute(ValuesArguments(value_list))
def eval_node(self, node): def eval_node(self, node):
return self._evaluator.eval_element(self, node) return self._evaluator.eval_element(self, node)

View File

@@ -351,50 +351,81 @@ def _get_global_stmt_scopes(evaluator, global_stmt, name):
@memoize_default(set(), evaluator_is_first_arg=True) @memoize_default(set(), evaluator_is_first_arg=True)
def _name_to_types(evaluator, context, name, scope): def _name_to_types(evaluator, context, name, scope):
types = [] types = []
typ = name.get_definition() node = name.get_definition()
if typ.isinstance(tree.ForStmt): if node.isinstance(tree.ForStmt):
types = pep0484.find_type_from_comment_hint_for(evaluator, typ, name) types = pep0484.find_type_from_comment_hint_for(evaluator, node, name)
if types: if types:
return types return types
if typ.isinstance(tree.WithStmt): if node.isinstance(tree.WithStmt):
types = pep0484.find_type_from_comment_hint_with(evaluator, typ, name) types = pep0484.find_type_from_comment_hint_with(evaluator, node, name)
if types: if types:
return types return types
if typ.type in ('for_stmt', 'comp_for'): if node.type in ('for_stmt', 'comp_for'):
container_types = context.eval_node(typ.children[3]) container_types = context.eval_node(node.children[3])
for_types = iterable.py__iter__types(evaluator, container_types, typ.children[3]) for_types = iterable.py__iter__types(evaluator, container_types, node.children[3])
types = check_tuple_assignments(evaluator, for_types, name) types = check_tuple_assignments(evaluator, for_types, name)
elif isinstance(typ, tree.Param): elif isinstance(node, tree.Param):
types = _eval_param(evaluator, context, typ, scope) return set() # TODO remove
elif typ.isinstance(tree.ExprStmt): types = _eval_param(evaluator, context, node, scope)
types = _remove_statements(evaluator, context, typ, name) elif node.isinstance(tree.ExprStmt):
elif typ.isinstance(tree.WithStmt): types = _remove_statements(evaluator, context, node, name)
types = evaluator.eval_element(typ.node_from_name(name)) elif node.isinstance(tree.WithStmt):
elif isinstance(typ, tree.Import): types = evaluator.eval_element(node.node_from_name(name))
elif isinstance(node, tree.Import):
types = imports.ImportWrapper(evaluator, name).follow() types = imports.ImportWrapper(evaluator, name).follow()
elif typ.isinstance(tree.Function, tree.Class): elif node.type in ('funcdef', 'classdef'):
types = [evaluator.wrap(typ, parent_context=context)] types = _apply_decorators(evaluator, context, node)
elif typ.type == 'global_stmt': elif node.type == 'global_stmt':
for s in _get_global_stmt_scopes(evaluator, typ, name): for s in _get_global_stmt_scopes(evaluator, node, name):
finder = NameFinder(evaluator, s, str(name)) finder = NameFinder(evaluator, s, str(name))
names_dicts = finder.get_filters(search_global=True) names_dicts = finder.get_filters(search_global=True)
# For global_stmt lookups, we only need the first possible scope, # For global_stmt lookups, we only need the first possible scope,
# which means the function itself. # which means the function itself.
names_dicts = [next(names_dicts)] names_dicts = [next(names_dicts)]
types += finder.find(names_dicts, attribute_lookup=False) types += finder.find(names_dicts, attribute_lookup=False)
elif isinstance(typ, tree.TryStmt): elif isinstance(node, tree.TryStmt):
# TODO an exception can also be a tuple. Check for those. # TODO an exception can also be a tuple. Check for those.
# TODO check for types that are not classes and add it to # TODO check for types that are not classes and add it to
# the static analysis report. # the static analysis report.
exceptions = evaluator.eval_element(name.get_previous_sibling().get_previous_sibling()) exceptions = evaluator.eval_element(name.get_previous_sibling().get_previous_sibling())
types = set(chain.from_iterable(evaluator.execute(t) for t in exceptions)) types = set(chain.from_iterable(evaluator.execute(t) for t in exceptions))
else: else:
if typ.isinstance(er.Function): raise DeprecationWarning
typ = typ.get_decorated_func() types = set([node])
types = set([typ])
return types return types
def _apply_decorators(evaluator, context, node):
"""
Returns the function, that should to be executed in the end.
This is also the places where the decorators are processed.
"""
decoratee_context = evaluator.wrap(node, parent_context=context)
initial = values = set([decoratee_context])
for dec in reversed(node.get_decorators()):
debug.dbg('decorator: %s %s', dec, values)
dec_values = context.eval_node(dec.children[1])
trailer = dec.children[2:-1]
if trailer:
# Create a trailer and evaluate it.
trailer = tree.Node('trailer', trailer)
trailer.parent = dec
dec_values = context.eval_trailer(dec_values, trailer)
if not len(dec_values):
debug.warning('decorator not found: %s on %s', dec, node)
return initial
values = unite(dec_value.execute(param.ValuesArguments([values]))
for dec_value in dec_values)
if not len(values):
debug.warning('not possible to resolve wrappers found %s', node)
return initial
debug.dbg('decorator end %s', values)
return values
def _remove_statements(evaluator, context, stmt, name): def _remove_statements(evaluator, context, stmt, name):
""" """
This is the part where statements are being stripped. This is the part where statements are being stripped.

View File

@@ -188,8 +188,7 @@ class InstanceClassFilter(ParserTreeFilter):
def _filter(self, names): def _filter(self, names):
names = super(InstanceClassFilter, self)._filter(names) names = super(InstanceClassFilter, self)._filter(names)
return [get_instance_el(self._evaluator, self._context, name, True) return [name for name in names if self._access_possible(name)]
for name in names if self._access_possible(name)]
def _check_flows(self, names): def _check_flows(self, names):
return names return names
@@ -207,7 +206,7 @@ class SelfNameFilter(InstanceClassFilter):
def _filter_self_names(self, names): def _filter_self_names(self, names):
for name in names: for name in names:
trailer = name.parent trailer = name.parent
if tree.is_node(trailer, 'trailer') \ if trailer.type == 'trailer' \
and len(trailer.children) == 2 \ and len(trailer.children) == 2 \
and trailer.children[0] == '.': and trailer.children[0] == '.':
if name.is_definition() and self._access_possible(name): if name.is_definition() and self._access_possible(name):
@@ -216,4 +215,4 @@ class SelfNameFilter(InstanceClassFilter):
if init_execution is not None and \ if init_execution is not None and \
init_execution.start_pos < name.start_pos < init_execution.end_pos: init_execution.start_pos < name.start_pos < init_execution.end_pos:
name = init_execution.name_for_position(name.start_pos) name = init_execution.name_for_position(name.start_pos)
yield get_instance_el(self._evaluator, self._context, name) yield name

View File

@@ -386,6 +386,7 @@ class ArrayLiteralContext(AbstractSequence, ArrayMixin):
function returns the value for a certain index. function returns the value for a certain index.
""" """
if self.type == 'dict': if self.type == 'dict':
raise NotImplementedError
# Get keys. # Get keys.
types = set() types = set()
for k, _ in self._items(): for k, _ in self._items():
@@ -442,9 +443,11 @@ class ArrayLiteralContext(AbstractSequence, ArrayMixin):
class _FakeArray(ArrayLiteralContext): class _FakeArray(ArrayLiteralContext):
def __init__(self, evaluator, container, type): def __init__(self, evaluator, container, type):
self.type = type # TODO is this class really needed?
self._array_type = type
self._evaluator = evaluator self._evaluator = evaluator
self.atom = container self.atom = container
self.parent_context = evaluator.BUILTINS
class ImplicitTuple(_FakeArray): class ImplicitTuple(_FakeArray):
@@ -458,28 +461,25 @@ class ImplicitTuple(_FakeArray):
class FakeSequence(_FakeArray): class FakeSequence(_FakeArray):
def __init__(self, evaluator, type, context_sets): def __init__(self, evaluator, array_type, lazy_context_list):
""" """
type should be one of "tuple", "list" type should be one of "tuple", "list"
""" """
super(FakeSequence, self).__init__(evaluator, context_sets, type) super(FakeSequence, self).__init__(evaluator, None, array_type)
self._context_sets = context_sets self._lazy_context_list = lazy_context_list
def _resolve(self, context_set):
for x in context_set:
try:
infer = x.infer
except AttributeError:
yield x
else:
for value in infer():
yield value
def _items(self): def _items(self):
return self._context_sets raise DeprecationWarning
return self._context_list
def py__getitem__(self, index): def py__getitem__(self, index):
return set(self._resolve(self._context_sets[index])) return self._lazy_context_list[index].infer()
def py__iter__(self):
return self._lazy_context_list
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._lazy_context_list)
def create_evaluated_sequence_set(evaluator, *types_order, **kwargs): def create_evaluated_sequence_set(evaluator, *types_order, **kwargs):
@@ -520,9 +520,10 @@ class FakeDict(_FakeArray):
yield set(compiled.create(self._evaluator, key) for key in self._dct) yield set(compiled.create(self._evaluator, key) for key in self._dct)
def py__getitem__(self, index): def py__getitem__(self, index):
return unite(self._evaluator.eval_element(v) for v in self._dct[index]) return self._dct[index].infer()
def _items(self): def _items(self):
raise DeprecationWarning
for key, values in self._dct.items(): for key, values in self._dct.items():
# TODO this is not proper. The values could be multiple values?! # TODO this is not proper. The values could be multiple values?!
yield key, values[0] yield key, values[0]

View File

@@ -130,11 +130,13 @@ class TreeArguments(AbstractArguments):
named_args = [] named_args = []
for stars, el in self._split(): for stars, el in self._split():
if stars == 1: if stars == 1:
arrays = self._evaluator.eval_element(self._context, el) arrays = self._context.eval_node(el)
iterators = [_iterate_star_args(self._evaluator, a, el, func) iterators = [_iterate_star_args(self._evaluator, a, el, func)
for a in arrays] for a in arrays]
iterators = list(iterators) iterators = list(iterators)
for values in list(zip_longest(*iterators)): for values in list(zip_longest(*iterators)):
# TODO zip_longest yields None, that means this would raise
# an exception?
yield None, context.get_merged_lazy_context(values) yield None, context.get_merged_lazy_context(values)
elif stars == 2: elif stars == 2:
arrays = self._evaluator.eval_element(self._context, el) arrays = self._evaluator.eval_element(self._context, el)
@@ -142,6 +144,7 @@ class TreeArguments(AbstractArguments):
for a in arrays] for a in arrays]
for dct in dicts: for dct in dicts:
for key, values in dct.items(): for key, values in dct.items():
raise NotImplementedError
yield key, values yield key, values
else: else:
if tree.is_node(el, 'argument'): if tree.is_node(el, 'argument'):
@@ -177,19 +180,19 @@ class TreeArguments(AbstractArguments):
return _get_calling_var_args(self._evaluator, self) return _get_calling_var_args(self._evaluator, self)
class ValueArguments(AbstractArguments): class ValuesArguments(AbstractArguments):
def __init__(self, value_list): def __init__(self, values_list):
self._value_list = value_list self._values_list = values_list
def unpack(self, func=None): def unpack(self, func=None):
for value in self._value_list: for values in self._values_list:
yield None, context.LazyKnownContext(value) yield None, context.LazyKnownContexts(values)
def get_calling_var_args(self): def get_calling_var_args(self):
return None return None
def __repr__(self): def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._value_list) return '<%s: %s>' % (type(self).__name__, self._values_list)
class ExecutedParam(object): class ExecutedParam(object):
@@ -242,11 +245,10 @@ def get_params(evaluator, parent_context, func, var_args):
for param in func.params: for param in func.params:
param_dict[str(param.name)] = param param_dict[str(param.name)] = param
unpacked_va = list(var_args.unpack(func)) unpacked_va = list(var_args.unpack(func))
from jedi.evaluate.representation import InstanceElement from jedi.evaluate.instance import TreeInstance
if isinstance(func, InstanceElement): if isinstance(parent_context, TreeInstance):
raise DeprecationWarning # Include the self parameter here.
# Include self at this place. unpacked_va.insert(0, (None, context.LazyKnownContext(parent_context)))
unpacked_va.insert(0, (None, [func.instance]))
var_arg_iterator = common.PushBackIterator(iter(unpacked_va)) var_arg_iterator = common.PushBackIterator(iter(unpacked_va))
non_matching_keys = defaultdict(lambda: []) non_matching_keys = defaultdict(lambda: [])
@@ -291,16 +293,16 @@ def get_params(evaluator, parent_context, func, var_args):
if param.stars == 1: if param.stars == 1:
# *args param # *args param
values_list = [] lazy_context_list = []
if argument is not None: if argument is not None:
values_list.append([argument]) lazy_context_list.append(argument)
for key, argument in var_arg_iterator: for key, argument in var_arg_iterator:
# Iterate until a key argument is found. # Iterate until a key argument is found.
if key: if key:
var_arg_iterator.push_back((key, argument)) var_arg_iterator.push_back((key, argument))
break break
values_list.append([argument]) lazy_context_list.append(argument)
seq = iterable.FakeSequence(evaluator, 'tuple', values_list) seq = iterable.FakeSequence(evaluator, 'tuple', lazy_context_list)
result_arg = context.LazyKnownContext(seq) result_arg = context.LazyKnownContext(seq)
elif param.stars == 2: elif param.stars == 2:
# **kwargs param # **kwargs param

View File

@@ -522,68 +522,10 @@ class FunctionContext(use_metaclass(CachedMetaClass, context.TreeContext, Wrappe
""" """
Needed because of decorators. Decorators are evaluated here. Needed because of decorators. Decorators are evaluated here.
""" """
def __init__(self, evaluator, parent_context, func, is_decorated=False): def __init__(self, evaluator, parent_context, func):
""" This should not be called directly """ """ This should not be called directly """
super(FunctionContext, self).__init__(evaluator, parent_context) super(FunctionContext, self).__init__(evaluator, parent_context)
self.base = self.base_func = func self.base = self.base_func = func
self.is_decorated = is_decorated
# A property that is set by the decorator resolution.
self.decorates = None
@memoize_default()
def get_decorated_func(self):
"""
Returns the function, that should to be executed in the end.
This is also the places where the decorators are processed.
"""
f = self.base_func
decorators = self.base_func.get_decorators()
if not decorators or self.is_decorated:
return self
# Only enter it, if has not already been processed.
if not self.is_decorated:
for dec in reversed(decorators):
debug.dbg('decorator: %s %s', dec, f)
dec_results = self._evaluator.eval_element(dec.children[1])
trailer = dec.children[2:-1]
if trailer:
# Create a trailer and evaluate it.
trailer = tree.Node('trailer', trailer)
trailer.parent = dec
dec_results = self._evaluator.eval_trailer(dec_results, trailer)
if not len(dec_results):
debug.warning('decorator not found: %s on %s', dec, self.base_func)
return self
decorator = dec_results.pop()
if dec_results:
debug.warning('multiple decorators found %s %s',
self.base_func, dec_results)
# Create param array.
if isinstance(f, FunctionContext):
old_func = f # TODO this is just hacky. change.
elif f.type == 'funcdef':
old_func = FunctionContext(self._evaluator, f, is_decorated=True)
else:
old_func = f
wrappers = self._evaluator.execute_evaluated(decorator, old_func)
if not len(wrappers):
debug.warning('no wrappers found %s', self.base_func)
return self
if len(wrappers) > 1:
# TODO resolve issue with multiple wrappers -> multiple types
debug.warning('multiple wrappers found %s %s',
self.base_func, wrappers)
f = list(wrappers)[0]
if isinstance(f, (Instance, FunctionContext)):
f.decorates = self
debug.dbg('decorator end %s', f)
return f
def names_dicts(self, search_global): def names_dicts(self, search_global):
if search_global: if search_global:
@@ -624,10 +566,7 @@ class FunctionContext(use_metaclass(CachedMetaClass, context.TreeContext, Wrappe
return compiled.get_special_object(self._evaluator, name) return compiled.get_special_object(self._evaluator, name)
def __repr__(self): def __repr__(self):
dec = '' return "<e%s of %s>" % (type(self).__name__, self.base_func)
if self.decorates is not None:
dec = " decorates " + repr(self.decorates)
return "<e%s of %s%s>" % (type(self).__name__, self.base_func, dec)
class LambdaWrapper(FunctionContext): class LambdaWrapper(FunctionContext):