1
0
forked from VimPlug/jedi

List comprehensions now at least don't cause errors anymore.

This commit is contained in:
Dave Halter
2016-11-16 09:43:45 +01:00
parent f672b367da
commit af7c13d2e6
7 changed files with 65 additions and 53 deletions

View File

@@ -365,7 +365,7 @@ class Evaluator(object):
pass
if comp_for.type == 'comp_for':
return set([iterable.Comprehension.from_atom(self, atom)])
return set([iterable.Comprehension.from_atom(self, context, atom)])
return set([iterable.ArrayLiteralContext(self, context, atom)])
def eval_trailer(self, context, types, trailer):

View File

@@ -43,11 +43,6 @@ class Context(object):
def eval_stmt(self, stmt, seek_name=None):
return self.evaluator.eval_statement(self, stmt, seek_name)
def py__getattribute__(self, name_str, position=None,
search_global=False, is_goto=False):
return self.evaluator.find_types(self, name_str, position,
search_global, is_goto)
class TreeContext(Context):
pass

View File

@@ -114,7 +114,7 @@ class AbstractUsedNamesFilter(AbstractFilter):
super(AbstractUsedNamesFilter, self).__init__(origin_scope)
self._parser_scope = parser_scope
self._used_names = self._parser_scope.get_root_node().used_names
self._context = context
self.context = context
def get(self, name):
try:
@@ -125,7 +125,7 @@ class AbstractUsedNamesFilter(AbstractFilter):
return self._convert_names(self._filter(names))
def _convert_names(self, names):
return [self.name_class(self._context, name) for name in names]
return [self.name_class(self.context, name) for name in names]
def values(self):
return self._convert_names(name for name_list in self._used_names.values()
@@ -140,14 +140,16 @@ class ParserTreeFilter(AbstractUsedNamesFilter):
def _filter(self, names):
names = super(ParserTreeFilter, self)._filter(names)
names = [n for n in names if n.is_definition() and n.parent.type != 'trailer']
names = [n for n in names if n.parent.get_parent_scope() == self._parser_scope]
names = [n for n in names
if ((n.parent if n.parent.type in ('classdef', 'funcdef') else n)
.get_parent_scope() == self._parser_scope)]
return list(self._check_flows(names))
def _check_flows(self, names):
for name in sorted(names, key=lambda name: name.start_pos, reverse=True):
check = flow_analysis.reachability_check(
self._context, self._parser_scope, name, self._origin_scope
self.context, self._parser_scope, name, self._origin_scope
)
if check is not flow_analysis.UNREACHABLE:
yield name
@@ -174,9 +176,9 @@ class FunctionExecutionFilter(ParserTreeFilter):
for name in names:
param = search_ancestor(name, 'param')
if param:
yield self.param_name(self._context, name)
yield self.param_name(self.context, name)
else:
yield TreeNameDefinition(self._context, name)
yield TreeNameDefinition(self.context, name)
class AnonymousInstanceFunctionExecutionFilter(FunctionExecutionFilter):

View File

@@ -130,7 +130,6 @@ class NameFinder(object):
analysis.add(self._evaluator, 'name-error', self.name_str,
message)
debug.dbg('finder._names_to_types: %s -> %s', names, types)
return types
def get_filters(self, search_global=False):
@@ -245,6 +244,20 @@ class NameFinder(object):
"""
names = []
for filter in filters:
if self._evaluator.predefined_if_name_dict_dict:
node = self.name_str
while node is not None and not isinstance(node, tree.IsScope):
node = node.parent
if node.type in ("if_stmt", "for_stmt", "comp_for"):
try:
name_dict = self._evaluator.predefined_if_name_dict_dict[node]
types = set(name_dict[str(self.name_str)])
except KeyError:
continue
else:
self._found_predefined_if_name = types
return []
else:
names = filter.get(self.name_str)
if names:
break
@@ -313,6 +326,7 @@ class NameFinder(object):
else:
types |= set(new_types)
debug.dbg('finder._names_to_types: %s -> %s', names, types)
if not names and isinstance(self.context, AbstractInstanceContext):
# handling __getattr__ / __getattribute__
return self._check_getattr(self.context)

View File

@@ -276,7 +276,7 @@ class InstanceClassFilter(filters.ParserTreeFilter):
def _equals_origin_scope(self):
node = self._origin_scope
while node is not None:
if node == self._parser_scope or node == self._context:
if node == self._parser_scope or node == self.context:
return True
node = node.get_parent_scope()
return False
@@ -293,7 +293,7 @@ class InstanceClassFilter(filters.ParserTreeFilter):
return names
def _convert_names(self, names):
return [self.name_class(self._context, self._class_context, name) for name in names]
return [self.name_class(self.context, self._class_context, name) for name in names]
class SelfNameFilter(InstanceClassFilter):
@@ -316,7 +316,7 @@ class SelfNameFilter(InstanceClassFilter):
if name.is_definition() and self._access_possible(name):
yield name
continue
init_execution = self._context.get_init_function()
init_execution = self.context.get_init_function()
# Hopefully we can somehow change this.
if init_execution is not None and \
init_execution.start_pos < name.start_pos < init_execution.end_pos:

View File

@@ -37,6 +37,9 @@ from jedi.evaluate import precedence
class AbstractSequence(context.Context):
def __init__(self, evaluator):
super(AbstractSequence, self).__init__(evaluator, evaluator.BUILTINS)
def get_filters(self, search_global, until_position=None, origin_scope=None):
raise NotImplementedError
@@ -117,7 +120,7 @@ def register_builtin_method(method_name, python_version_match=None):
@has_builtin_methods
class GeneratorMixin(object):
type = None
array_type = None
@register_builtin_method('send')
@register_builtin_method('next', python_version_match=2)
@@ -145,7 +148,7 @@ class GeneratorMixin(object):
return gen_obj.py__class__()
class Generator(context.Context, GeneratorMixin):
class Generator(GeneratorMixin, context.Context):
"""Handling of `yield` functions."""
def __init__(self, evaluator, func_execution_context):
@@ -159,9 +162,9 @@ class Generator(context.Context, GeneratorMixin):
return "<%s of %s>" % (type(self).__name__, self._func_execution_context)
class Comprehension(object):
class Comprehension(AbstractSequence):
@staticmethod
def from_atom(evaluator, atom):
def from_atom(evaluator, context, atom):
bracket = atom.children[0]
if bracket == '{':
if atom.children[1].children[1] == ':':
@@ -172,10 +175,11 @@ class Comprehension(object):
cls = GeneratorComprehension
elif bracket == '[':
cls = ListComprehension
return cls(evaluator, atom)
return cls(evaluator, context, atom)
def __init__(self, evaluator, atom):
self.evaluator = evaluator
def __init__(self, evaluator, defining_context, atom):
super(Comprehension, self).__init__(evaluator)
self._defining_context = defining_context
self._atom = atom
def _get_comprehension(self):
@@ -209,20 +213,21 @@ class Comprehension(object):
evaluator = self.evaluator
comp_for = comp_fors[0]
input_node = comp_for.children[3]
input_types = evaluator.eval_element(input_node)
input_types = self._defining_context.eval_node(input_node)
iterated = py__iter__(evaluator, input_types, input_node)
exprlist = comp_for.children[1]
for i, types in enumerate(iterated):
for i, lazy_context in enumerate(iterated):
types = lazy_context.infer()
evaluator.predefined_if_name_dict_dict[comp_for] = \
unpack_tuple_to_dict(evaluator, types, exprlist)
try:
for result in self._nested(comp_fors[1:]):
yield result
except IndexError:
iterated = evaluator.eval_element(self._eval_node())
iterated = self._defining_context.eval_node(self._eval_node())
if self.array_type == 'dict':
yield iterated, evaluator.eval_element(self._eval_node(2))
yield iterated, self._defining_context.eval_node(self._eval_node(2))
else:
yield iterated
finally:
@@ -236,8 +241,8 @@ class Comprehension(object):
yield result
def py__iter__(self):
raise NotImplementedError
return self._iterate()
for set_ in self._iterate():
yield context.LazyKnownContexts(set_)
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._atom)
@@ -292,28 +297,24 @@ class DICT(object):
return create_evaluated_sequence_set(self.evaluator, *items, sequence_type='list')
class ListComprehension(Comprehension, ArrayMixin):
type = 'list'
class ListComprehension(ArrayMixin, Comprehension):
array_type = 'list'
def py__getitem__(self, index):
all_types = list(self.py__iter__())
result = all_types[index]
if isinstance(index, slice):
return create_evaluated_sequence_set(
self.evaluator,
unite(result),
sequence_type='list'
)
return result
return set([self])
all_types = list(self.py__iter__())
return all_types[index].infer()
class SetComprehension(Comprehension, ArrayMixin):
type = 'set'
class SetComprehension(ArrayMixin, Comprehension):
array_type = 'set'
@has_builtin_methods
class DictComprehension(Comprehension, ArrayMixin):
type = 'dict'
class DictComprehension(ArrayMixin, Comprehension):
array_type = 'dict'
def _get_comp_for(self):
return self._get_comprehension().children[3]
@@ -343,7 +344,7 @@ class DictComprehension(Comprehension, ArrayMixin):
return create_evaluated_sequence_set(self.evaluator, items, sequence_type='list')
class GeneratorComprehension(Comprehension, GeneratorMixin):
class GeneratorComprehension(GeneratorMixin, Comprehension):
pass
@@ -353,7 +354,7 @@ class ArrayLiteralContext(ArrayMixin, AbstractSequence):
'{': 'dict'}
def __init__(self, evaluator, defining_context, atom):
super(ArrayLiteralContext, self).__init__(evaluator, evaluator.BUILTINS)
super(ArrayLiteralContext, self).__init__(evaluator)
self.atom = atom
self._defining_context = defining_context
@@ -461,7 +462,6 @@ class _FakeArray(ArrayLiteralContext):
self.array_type = type
self.evaluator = evaluator
self.atom = container
self.parent_context = evaluator.BUILTINS
class ImplicitTuple(_FakeArray):
@@ -585,7 +585,7 @@ def unpack_tuple_to_dict(evaluator, types, exprlist):
dct = {}
parts = iter(exprlist.children[::2])
n = 0
for iter_types in py__iter__(evaluator, types, exprlist):
for lazy_context in py__iter__(evaluator, types, exprlist):
n += 1
try:
part = next(parts)
@@ -593,7 +593,7 @@ def unpack_tuple_to_dict(evaluator, types, exprlist):
analysis.add(evaluator, 'value-error-too-many-values', part,
message="ValueError: too many values to unpack (expected %s)" % n)
else:
dct.update(unpack_tuple_to_dict(evaluator, iter_types, part))
dct.update(unpack_tuple_to_dict(evaluator, lazy_context.infer(), part))
has_parts = next(parts, None)
if types and has_parts is not None:
analysis.add(evaluator, 'value-error-too-few-values', has_parts,
@@ -685,6 +685,7 @@ def py__getitem__(evaluator, context, types, trailer):
result |= py__iter__types(evaluator, set([typ]))
except KeyError:
# Must be a dict. Lists don't raise KeyErrors.
raise
result |= typ.dict_values()
return result

View File

@@ -687,7 +687,7 @@ class FunctionExecutionContext(Executed):
parent = for_stmt.parent
if parent.type == 'suite':
parent = parent.parent
if for_stmt.type == 'for_stmt' and parent == self.func_def \
if for_stmt.type == 'for_stmt' and parent == self.funcdef \
and for_stmt.defines_one_name(): # Simplicity for now.
if for_stmt == last_for_stmt:
yields_order[-1][1].append(yield_)
@@ -709,7 +709,7 @@ class FunctionExecutionContext(Executed):
yield result
else:
input_node = for_stmt.get_input_node()
for_types = evaluator.eval_element(input_node)
for_types = self.eval_node(input_node)
ordered = iterable.py__iter__(evaluator, for_types, input_node)
for index_types in ordered:
dct = {str(for_stmt.children[1]): index_types}