diff --git a/jedi/api/classes.py b/jedi/api/classes.py index 9dd74746..142dc537 100644 --- a/jedi/api/classes.py +++ b/jedi/api/classes.py @@ -596,7 +596,7 @@ class Definition(BaseDefinition): if typ == 'instance': typ = 'class' # The description should be similar to Py objects. d = typ + ' ' + d.name.get_code() - elif isinstance(d, iterable.ArrayLiteralContext): + elif isinstance(d, iterable.SequenceLiteralContext): d = 'class ' + d.type elif isinstance(d, (tree.Class, er.ClassContext, er.Instance)): d = 'class ' + unicode(d.name) diff --git a/jedi/evaluate/__init__.py b/jedi/evaluate/__init__.py index acd137f6..82b631bd 100644 --- a/jedi/evaluate/__init__.py +++ b/jedi/evaluate/__init__.py @@ -281,7 +281,7 @@ class Evaluator(object): types = self.eval_trailer(context, types, trailer) elif element.type in ('testlist_star_expr', 'testlist',): # The implicit tuple in statements. - types = set([iterable.ArrayLiteralContext(self, context, element)]) + types = set([iterable.SequenceLiteralContext(self, context, element)]) elif element.type in ('not_test', 'factor'): types = self.eval_element(context, element.children[-1]) for operator in element.children[:-1]: @@ -368,7 +368,18 @@ class Evaluator(object): if comp_for.type == 'comp_for': return set([iterable.Comprehension.from_atom(self, context, atom)]) - return set([iterable.ArrayLiteralContext(self, context, atom)]) + + # It's a dict/list/tuple literal. + array_node = c[1] + try: + array_node_c = array_node.children + except AttributeError: + array_node_c = [] + if c[0] == '{' and (array_node == '}' or ':' in array_node_c): + context = iterable.DictLiteralContext(self, context, atom) + else: + context = iterable.SequenceLiteralContext(self, context, atom) + return set([context]) def eval_trailer(self, context, types, trailer): trailer_op, node = trailer.children[:2] @@ -554,6 +565,9 @@ class Evaluator(object): return AnonymousInstance(self, parent_context, class_context) else: return class_context + elif scope_node.type == 'comp_for': + return iterable.CompForContext.from_comp_for(parent_context, scope_node) + raise Exception("There's a scope that was not managed.") base_node = base_context.get_node() diff --git a/jedi/evaluate/docstrings.py b/jedi/evaluate/docstrings.py index aaebba93..a76355fa 100644 --- a/jedi/evaluate/docstrings.py +++ b/jedi/evaluate/docstrings.py @@ -25,7 +25,7 @@ from jedi.evaluate.cache import memoize_default from jedi.parser import ParserWithRecovery, load_grammar from jedi.parser.tree import search_ancestor from jedi.common import indent_block -from jedi.evaluate.iterable import ArrayLiteralContext, FakeSequence, AlreadyEvaluated +from jedi.evaluate.iterable import SequenceLiteralContext, FakeSequence, AlreadyEvaluated DOCSTRING_PARAM_PATTERNS = [ @@ -173,7 +173,7 @@ def _execute_array_values(evaluator, array): Tuples indicate that there's not just one return value, but the listed ones. `(str, int)` means that it returns a tuple with both types. """ - if isinstance(array, ArrayLiteralContext): + if isinstance(array, SequenceLiteralContext): values = [] for lazy_context in array.py__iter__(): objects = unite(_execute_array_values(evaluator, typ) for typ in lazy_context.infer()) @@ -194,7 +194,7 @@ def follow_param(module_context, param): types = eval_docstring(func.raw_doc) if func.name.value == '__init__': cls = search_ancestor(func, 'classdef') - if cls.type == 'classdef': + if cls is not None: types |= eval_docstring(cls.raw_doc) return types diff --git a/jedi/evaluate/helpers.py b/jedi/evaluate/helpers.py index 7a81935b..7ec43628 100644 --- a/jedi/evaluate/helpers.py +++ b/jedi/evaluate/helpers.py @@ -100,25 +100,22 @@ def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False): else: cut = index + 1 - values = context.eval_node(power.children[0]) - for trailer in power.children[1:cut]: - values = context.eval_trailer(values, trailer) - return values - - # TODO delete - ''' - if new_power.type == 'error_node': + if power.type == 'error_node': start = index while True: start -= 1 - if new_power.children[start].type != 'trailer': + base = power.children[start] + if base.type != 'trailer': break - transformed = tree.Node('power', new_power.children[start:]) - transformed.parent = new_power.parent - return transformed + trailers = power.children[start + 1: index + 1] + else: + base = power.children[0] + trailers = power.children[1:cut] - return new_power - ''' + values = context.eval_node(base) + for trailer in trailers: + values = context.eval_trailer(values, trailer) + return values def call_of_leaf(leaf): diff --git a/jedi/evaluate/iterable.py b/jedi/evaluate/iterable.py index fbfd0c8a..bf862e0f 100644 --- a/jedi/evaluate/iterable.py +++ b/jedi/evaluate/iterable.py @@ -38,6 +38,8 @@ from jedi.evaluate import precedence class AbstractSequence(context.Context): + builtin_methods = {} + def __init__(self, evaluator): super(AbstractSequence, self).__init__(evaluator, evaluator.BUILTINS) @@ -101,7 +103,16 @@ class SpecialMethodFilter(DictFilter): def has_builtin_methods(cls): - cls.builtin_methods = {} + base_dct = {} + # Need to care properly about inheritance. Builtin Methods should not get + # lost, just because they are not mentioned in a class. + for base_cls in reversed(cls.__bases__): + try: + base_dct.update(base_cls.builtin_methods) + except AttributeError: + pass + + cls.builtin_methods = base_dct for func in cls.__dict__.values(): try: cls.builtin_methods.update(func.registered_builtin_methods) @@ -226,10 +237,7 @@ class Comprehension(AbstractSequence): @memoize_default() def _get_comp_for_context(self, parent_context, comp_for): - return CompForContext.from_comp_for( - parent_context, - comp_for, - ) + return parent_context.create_context(comp_for) def _nested(self, comp_fors, parent_context=None): evaluator = self.evaluator @@ -273,7 +281,6 @@ class Comprehension(AbstractSequence): return "<%s of %s>" % (type(self).__name__, self._atom) -@has_builtin_methods class ArrayMixin(object): @memoize_default() def names_dicts(self, search_global=False): # Always False. @@ -308,20 +315,6 @@ class ArrayMixin(object): return unite(self._defining_context.eval_node(v) for k, v in self._items()) -class DICT(object): - @register_builtin_method('values') - def _imitate_values(self): - items = self.dict_values() - return create_evaluated_sequence_set(self.evaluator, items, sequence_type='list') - - @register_builtin_method('items') - def _imitate_items(self): - items = [set([FakeSequence(self.evaluator, (k, v), 'tuple')]) - for k, v in self._items()] - - return create_evaluated_sequence_set(self.evaluator, *items, sequence_type='list') - - class ListComprehension(ArrayMixin, Comprehension): array_type = 'list' @@ -345,9 +338,8 @@ class DictComprehension(ArrayMixin, Comprehension): return self._get_comprehension().children[3] def py__iter__(self): - raise NotImplementedError for keys, values in self._iterate(): - yield keys + yield context.LazyKnownContexts(keys) def py__getitem__(self, index): for keys, values in self._iterate(): @@ -360,11 +352,19 @@ class DictComprehension(ArrayMixin, Comprehension): def dict_values(self): return unite(values for keys, values in self._iterate()) + @register_builtin_method('values') + def _imitate_values(self): + lazy_context = context.LazyKnownContexts(self.dict_values()) + return set([FakeSequence(self.evaluator, 'list', [lazy_context])]) + @register_builtin_method('items') def _imitate_items(self): - items = set(FakeSequence(self.evaluator, - (AlreadyEvaluated(keys), AlreadyEvaluated(values)), 'tuple') - for keys, values in self._iterate()) + items = set( + FakeSequence( + self.evaluator, 'tuple' + (context.LazyKnownContexts(keys), context.LazyKnownContexts(values)) + ) for keys, values in self._iterate() + ) return create_evaluated_sequence_set(self.evaluator, items, sequence_type='list') @@ -373,28 +373,22 @@ class GeneratorComprehension(GeneratorMixin, Comprehension): pass -class ArrayLiteralContext(ArrayMixin, AbstractSequence): +class SequenceLiteralContext(ArrayMixin, AbstractSequence): mapping = {'(': 'tuple', '[': 'list', - '{': 'dict'} + '{': 'set'} def __init__(self, evaluator, defining_context, atom): - super(ArrayLiteralContext, self).__init__(evaluator) + super(SequenceLiteralContext, self).__init__(evaluator) self.atom = atom self._defining_context = defining_context if self.atom.type in ('testlist_star_expr', 'testlist'): self.array_type = 'tuple' else: - self.array_type = ArrayLiteralContext.mapping[atom.children[0]] + self.array_type = SequenceLiteralContext.mapping[atom.children[0]] """The builtin name of the array (list, set, tuple or dict).""" - c = self.atom.children - array_node = c[1] - if self.array_type == 'dict' and array_node != '}' \ - and (not hasattr(array_node, 'children') or ':' not in array_node.children): - self.array_type = 'set' - def py__getitem__(self, index): """Here the index is an int/str. Raises IndexError/KeyError.""" if self.array_type == 'dict': @@ -469,7 +463,7 @@ class ArrayLiteralContext(ArrayMixin, AbstractSequence): def exact_key_items(self): """ Returns a generator of tuples like dict.items(), where the key is - resolved (as a string) and the values are still LazyContexts. + resolved (as a string) and the values are still lazy contexts. """ for key_node, value in self._items(): for key in self._defining_context.eval_node(key_node): @@ -480,9 +474,36 @@ class ArrayLiteralContext(ArrayMixin, AbstractSequence): return "<%s of %s>" % (self.__class__.__name__, self.atom) -class _FakeArray(ArrayLiteralContext): +@has_builtin_methods +class DictLiteralContext(SequenceLiteralContext): + array_type = 'dict' + + def __init__(self, evaluator, defining_context, atom): + super(SequenceLiteralContext, self).__init__(evaluator) + self._defining_context = defining_context + self.atom = atom + + @register_builtin_method('values') + def _imitate_values(self): + lazy_context = context.LazyKnownContexts(self.dict_values()) + return FakeSequence(self.evaluator, 'list', [lazy_context]) + + @register_builtin_method('items') + def _imitate_items(self): + lazy_contexts = [ + context.LazyKnownContext(FakeSequence( + self.evaluator, 'tuple', + (context.LazyTreeContext(self._defining_context, key_node), + context.LazyTreeContext(self._defining_context, value_node)) + )) for key_node, value_node in self._items() + ] + + return set([FakeSequence(self.evaluator, 'list', lazy_contexts)]) + + +class _FakeArray(SequenceLiteralContext): def __init__(self, evaluator, container, type): - super(ArrayLiteralContext, self).__init__(evaluator) + super(SequenceLiteralContext, self).__init__(evaluator) self.array_type = type self.atom = container # TODO is this class really needed? @@ -520,18 +541,6 @@ class FakeSequence(_FakeArray): return "<%s of %s>" % (type(self).__name__, self._lazy_context_list) -def create_evaluated_sequence_set(evaluator, *types_order, **kwargs): - """ - ``sequence_type`` is a named argument, that doesn't work in Python2. For backwards - compatibility reasons, we're now using kwargs. - """ - sequence_type = kwargs.pop('sequence_type') - assert not kwargs - - sets = tuple(AlreadyEvaluated(types) for types in types_order) - return set([FakeSequence(evaluator, sets, sequence_type)]) - - class AlreadyEvaluated(frozenset): """A simple container to add already evaluated objects to an array.""" def __init__(self, *args, **kwargs):