Use ContextSet closer to they way how Python's set works

This commit is contained in:
Dave Halter
2018-09-24 20:21:29 +02:00
parent 8fad33b125
commit 75a02a13d9
22 changed files with 114 additions and 128 deletions

View File

@@ -155,7 +155,7 @@ class Script(object):
string_names=names, string_names=names,
code_lines=self._code_lines, code_lines=self._code_lines,
) )
self._evaluator.module_cache.add(names, ContextSet(module)) self._evaluator.module_cache.add(names, ContextSet([module]))
return module return module
def __repr__(self): def __repr__(self):

View File

@@ -12,18 +12,10 @@ class BaseContext(object):
class BaseContextSet(object): class BaseContextSet(object):
def __init__(self, *args): def __init__(self, iterable):
if len(args) == 1 and hasattr(args[0], '__iter__'): self._set = frozenset(iterable)
# TODO replace with this everywhere. for context in iterable:
self._set = frozenset(args[0]) assert not isinstance(context, BaseContextSet)
return
for arg in args:
assert not isinstance(arg, BaseContextSet)
self._set = frozenset(args)
@classmethod
def from_iterable(cls, iterable):
return cls.from_set(set(iterable))
@classmethod @classmethod
def _from_frozen_set(cls, frozenset_): def _from_frozen_set(cls, frozenset_):
@@ -31,11 +23,6 @@ class BaseContextSet(object):
self._set = frozenset_ self._set = frozenset_
return self return self
# TODO remove this function
@classmethod
def from_set(cls, set_):
return cls(*set_)
@classmethod @classmethod
def from_sets(cls, sets): def from_sets(cls, sets):
""" """
@@ -69,7 +56,7 @@ class BaseContextSet(object):
return '%s(%s)' % (self.__class__.__name__, ', '.join(str(s) for s in self._set)) return '%s(%s)' % (self.__class__.__name__, ', '.join(str(s) for s in self._set))
def filter(self, filter_func): def filter(self, filter_func):
return self.from_iterable(filter(filter_func, self._set)) return self.__class__(filter(filter_func, self._set))
def __getattr__(self, name): def __getattr__(self, name):
def mapper(*args, **kwargs): def mapper(*args, **kwargs):

View File

@@ -220,14 +220,14 @@ class Evaluator(object):
new_name_dicts = list(original_name_dicts) new_name_dicts = list(original_name_dicts)
for i, name_dict in enumerate(new_name_dicts): for i, name_dict in enumerate(new_name_dicts):
new_name_dicts[i] = name_dict.copy() new_name_dicts[i] = name_dict.copy()
new_name_dicts[i][if_name.value] = ContextSet(definition) new_name_dicts[i][if_name.value] = ContextSet([definition])
name_dicts += new_name_dicts name_dicts += new_name_dicts
else: else:
for name_dict in name_dicts: for name_dict in name_dicts:
name_dict[if_name.value] = definitions name_dict[if_name.value] = definitions
if len(name_dicts) > 1: if len(name_dicts) > 1:
result = ContextSet() result = NO_CONTEXTS
for name_dict in name_dicts: for name_dict in name_dicts:
with helpers.predefine_names(context, if_stmt, name_dict): with helpers.predefine_names(context, if_stmt, name_dict):
result |= eval_node(context, element) result |= eval_node(context, element)

View File

@@ -81,7 +81,7 @@ def _iterate_argument_clinic(evaluator, arguments, parameters):
break break
lazy_contexts.append(argument) lazy_contexts.append(argument)
yield ContextSet(iterable.FakeSequence(evaluator, u'tuple', lazy_contexts)) yield ContextSet([iterable.FakeSequence(evaluator, u'tuple', lazy_contexts)])
lazy_contexts lazy_contexts
continue continue
elif stars == 2: elif stars == 2:

View File

@@ -264,7 +264,7 @@ def _getitem(context, index_contexts, contextualized_node):
# The actual getitem call. # The actual getitem call.
simple_getitem = getattr(context, 'py__simple_getitem__', None) simple_getitem = getattr(context, 'py__simple_getitem__', None)
result = ContextSet() result = NO_CONTEXTS
unused_contexts = set() unused_contexts = set()
for index_context in index_contexts: for index_context in index_contexts:
if simple_getitem is not None: if simple_getitem is not None:
@@ -293,7 +293,7 @@ def _getitem(context, index_contexts, contextualized_node):
# all results. # all results.
if unused_contexts or not index_contexts: if unused_contexts or not index_contexts:
result |= context.py__getitem__( result |= context.py__getitem__(
ContextSet.from_set(unused_contexts), ContextSet(unused_contexts),
contextualized_node contextualized_node
) )
debug.dbg('py__getitem__ result: %s', result) debug.dbg('py__getitem__ result: %s', result)
@@ -302,7 +302,7 @@ def _getitem(context, index_contexts, contextualized_node):
class ContextSet(BaseContextSet): class ContextSet(BaseContextSet):
def py__class__(self): def py__class__(self):
return ContextSet.from_iterable(c.py__class__() for c in self._set) return ContextSet(c.py__class__() for c in self._set)
def iterate(self, contextualized_node=None, is_async=False): def iterate(self, contextualized_node=None, is_async=False):
from jedi.evaluate.lazy_context import get_merged_lazy_context from jedi.evaluate.lazy_context import get_merged_lazy_context
@@ -322,7 +322,7 @@ class ContextSet(BaseContextSet):
return ContextSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) return ContextSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set)
def try_merge(self, function_name): def try_merge(self, function_name):
context_set = self.__class__() context_set = self.__class__([])
for c in self._set: for c in self._set:
try: try:
method = getattr(c, function_name) method = getattr(c, function_name)
@@ -333,11 +333,11 @@ class ContextSet(BaseContextSet):
return context_set return context_set
NO_CONTEXTS = ContextSet() NO_CONTEXTS = ContextSet([])
def iterator_to_context_set(func): def iterator_to_context_set(func):
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
return ContextSet.from_iterable(func(*args, **kwargs)) return ContextSet(func(*args, **kwargs))
return wrapper return wrapper

View File

@@ -9,7 +9,7 @@ from jedi._compatibility import force_unicode, Parameter
from jedi.cache import underscore_memoization, memoize_method from jedi.cache import underscore_memoization, memoize_method
from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \ from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \
ContextNameMixin ContextNameMixin
from jedi.evaluate.base_context import Context, ContextSet from jedi.evaluate.base_context import Context, ContextSet, NO_CONTEXTS
from jedi.evaluate.lazy_context import LazyKnownContext from jedi.evaluate.lazy_context import LazyKnownContext
from jedi.evaluate.compiled.access import _sentinel from jedi.evaluate.compiled.access import _sentinel
from jedi.evaluate.cache import evaluator_function_cache from jedi.evaluate.cache import evaluator_function_cache
@@ -61,11 +61,11 @@ class CompiledObject(Context):
).py__call__(arguments=arguments) ).py__call__(arguments=arguments)
if self.access_handle.is_class(): if self.access_handle.is_class():
from jedi.evaluate.context import CompiledInstance from jedi.evaluate.context import CompiledInstance
return ContextSet( return ContextSet([
CompiledInstance(self.evaluator, self.parent_context, self, arguments) CompiledInstance(self.evaluator, self.parent_context, self, arguments)
) ])
else: else:
return ContextSet.from_iterable(self._execute_function(arguments)) return ContextSet(self._execute_function(arguments))
@CheckAttribute() @CheckAttribute()
def py__class__(self): def py__class__(self):
@@ -157,13 +157,13 @@ class CompiledObject(Context):
with reraise_getitem_errors(IndexError, KeyError, TypeError): with reraise_getitem_errors(IndexError, KeyError, TypeError):
access = self.access_handle.py__simple_getitem__(index) access = self.access_handle.py__simple_getitem__(index)
if access is None: if access is None:
return ContextSet() return NO_CONTEXTS
return ContextSet(create_from_access_path(self.evaluator, access)) return ContextSet([create_from_access_path(self.evaluator, access)])
@CheckAttribute() @CheckAttribute()
def py__getitem__(self, index_context_set, contextualized_node): def py__getitem__(self, index_context_set, contextualized_node):
return ContextSet.from_iterable( return ContextSet(
create_from_access_path(self.evaluator, access) create_from_access_path(self.evaluator, access)
for access in self.access_handle.py__getitem__all_values() for access in self.access_handle.py__getitem__all_values()
) )
@@ -243,9 +243,9 @@ class CompiledName(AbstractNameDefinition):
@underscore_memoization @underscore_memoization
def infer(self): def infer(self):
return ContextSet(create_from_name( return ContextSet([create_from_name(
self._evaluator, self.parent_context, self.string_name self._evaluator, self.parent_context, self.string_name
)) )])
class SignatureParamName(AbstractNameDefinition): class SignatureParamName(AbstractNameDefinition):
@@ -268,9 +268,9 @@ class SignatureParamName(AbstractNameDefinition):
def infer(self): def infer(self):
p = self._signature_param p = self._signature_param
evaluator = self.parent_context.evaluator evaluator = self.parent_context.evaluator
contexts = ContextSet() contexts = NO_CONTEXTS
if p.has_default: if p.has_default:
contexts = ContextSet(create_from_access_path(evaluator, p.default)) contexts = ContextSet([create_from_access_path(evaluator, p.default)])
if p.has_annotation: if p.has_annotation:
annotation = create_from_access_path(evaluator, p.annotation) annotation = create_from_access_path(evaluator, p.annotation)
contexts |= execute_evaluated(annotation) contexts |= execute_evaluated(annotation)
@@ -288,7 +288,7 @@ class UnresolvableParamName(AbstractNameDefinition):
return Parameter.POSITIONAL_ONLY return Parameter.POSITIONAL_ONLY
def infer(self): def infer(self):
return ContextSet() return NO_CONTEXTS
class CompiledContextName(ContextNameMixin, AbstractNameDefinition): class CompiledContextName(ContextNameMixin, AbstractNameDefinition):
@@ -309,7 +309,7 @@ class EmptyCompiledName(AbstractNameDefinition):
self.string_name = name self.string_name = name
def infer(self): def infer(self):
return ContextSet() return NO_CONTEXTS
class CompiledObjectFilter(AbstractFilter): class CompiledObjectFilter(AbstractFilter):

View File

@@ -82,9 +82,9 @@ class MixedName(compiled.CompiledName):
access_handle = self.parent_context.access_handle access_handle = self.parent_context.access_handle
# TODO use logic from compiled.CompiledObjectFilter # TODO use logic from compiled.CompiledObjectFilter
access_handle = access_handle.getattr(self.string_name, default=None) access_handle = access_handle.getattr(self.string_name, default=None)
return ContextSet( return ContextSet([
_create(self._evaluator, access_handle, parent_context=self.parent_context) _create(self._evaluator, access_handle, parent_context=self.parent_context)
) ])
@property @property
def api_type(self): def api_type(self):
@@ -213,7 +213,7 @@ def _create(evaluator, access_handle, parent_context, *args):
code_lines=code_lines, code_lines=code_lines,
) )
if name is not None: if name is not None:
evaluator.module_cache.add(string_names, ContextSet(module_context)) evaluator.module_cache.add(string_names, ContextSet([module_context]))
tree_context = module_context.create_context( tree_context = module_context.create_context(
tree_node, tree_node,

View File

@@ -34,7 +34,7 @@ class LambdaName(AbstractNameDefinition):
return self._lambda_context.tree_node.start_pos return self._lambda_context.tree_node.start_pos
def infer(self): def infer(self):
return ContextSet(self._lambda_context) return ContextSet([self._lambda_context])
class AbstractFunction(TreeContext): class AbstractFunction(TreeContext):
@@ -195,7 +195,7 @@ class FunctionExecutionContext(TreeContext):
children = r.children children = r.children
except AttributeError: except AttributeError:
ctx = compiled.builtin_from_name(self.evaluator, u'None') ctx = compiled.builtin_from_name(self.evaluator, u'None')
context_set |= ContextSet(ctx) context_set |= ContextSet([ctx])
else: else:
context_set |= self.eval_node(children[1]) context_set |= self.eval_node(children[1])
if check is flow_analysis.REACHABLE: if check is flow_analysis.REACHABLE:
@@ -341,7 +341,7 @@ class FunctionExecutionContext(TreeContext):
).execute_annotation() ).execute_annotation()
else: else:
if is_generator: if is_generator:
return ContextSet(iterable.Generator(evaluator, self)) return ContextSet([iterable.Generator(evaluator, self)])
else: else:
return self.get_return_values() return self.get_return_values()

View File

@@ -25,7 +25,7 @@ class InstanceExecutedParam(object):
self.string_name = self._tree_param.name.value self.string_name = self._tree_param.name.value
def infer(self): def infer(self):
return ContextSet(self._instance) return ContextSet([self._instance])
def matches_signature(self): def matches_signature(self):
return True return True
@@ -120,7 +120,7 @@ class AbstractInstanceContext(Context):
none_obj = compiled.builtin_from_name(self.evaluator, u'None') none_obj = compiled.builtin_from_name(self.evaluator, u'None')
return self.execute_function_slots(names, none_obj, obj) return self.execute_function_slots(names, none_obj, obj)
else: else:
return ContextSet(self) return ContextSet([self])
def get_filters(self, search_global=None, until_position=None, def get_filters(self, search_global=None, until_position=None,
origin_scope=None, include_self_names=True): origin_scope=None, include_self_names=True):

View File

@@ -44,7 +44,7 @@ from jedi.parser_utils import get_comp_fors
class IterableMixin(object): class IterableMixin(object):
def py__stop_iteration_returns(self): def py__stop_iteration_returns(self):
return ContextSet(compiled.builtin_from_name(self.evaluator, u'None')) return ContextSet([compiled.builtin_from_name(self.evaluator, u'None')])
class GeneratorBase(BuiltinOverwrite, IterableMixin): class GeneratorBase(BuiltinOverwrite, IterableMixin):
@@ -210,7 +210,7 @@ class Sequence(BuiltinOverwrite, IterableMixin):
def py__getitem__(self, index_context_set, contextualized_node): def py__getitem__(self, index_context_set, contextualized_node):
if self.array_type == 'dict': if self.array_type == 'dict':
return self._dict_values() return self._dict_values()
return iterate_contexts(ContextSet(self)) return iterate_contexts(ContextSet([self]))
class ListComprehension(ComprehensionMixin, Sequence): class ListComprehension(ComprehensionMixin, Sequence):
@@ -218,7 +218,7 @@ class ListComprehension(ComprehensionMixin, Sequence):
def py__simple_getitem__(self, index): def py__simple_getitem__(self, index):
if isinstance(index, slice): if isinstance(index, slice):
return ContextSet(self) return ContextSet([self])
all_types = list(self.py__iter__()) all_types = list(self.py__iter__())
with reraise_getitem_errors(IndexError, TypeError): with reraise_getitem_errors(IndexError, TypeError):
@@ -254,7 +254,7 @@ class DictComprehension(ComprehensionMixin, Sequence):
@publish_method('values') @publish_method('values')
def _imitate_values(self): def _imitate_values(self):
lazy_context = LazyKnownContexts(self._dict_values()) lazy_context = LazyKnownContexts(self._dict_values())
return ContextSet(FakeSequence(self.evaluator, u'list', [lazy_context])) return ContextSet([FakeSequence(self.evaluator, u'list', [lazy_context])])
@publish_method('items') @publish_method('items')
def _imitate_items(self): def _imitate_items(self):
@@ -270,7 +270,7 @@ class DictComprehension(ComprehensionMixin, Sequence):
for key, value in self._iterate() for key, value in self._iterate()
] ]
return ContextSet(FakeSequence(self.evaluator, u'list', lazy_contexts)) return ContextSet([FakeSequence(self.evaluator, u'list', lazy_contexts)])
class GeneratorComprehension(ComprehensionMixin, GeneratorBase): class GeneratorComprehension(ComprehensionMixin, GeneratorBase):
@@ -310,7 +310,7 @@ class SequenceLiteralContext(Sequence):
raise SimpleGetItemNotFound('No key found in dictionary %s.' % self) raise SimpleGetItemNotFound('No key found in dictionary %s.' % self)
if isinstance(index, slice): if isinstance(index, slice):
return ContextSet(self) return ContextSet([self])
else: else:
with reraise_getitem_errors(TypeError, KeyError, IndexError): with reraise_getitem_errors(TypeError, KeyError, IndexError):
node = self.get_tree_entries()[index] node = self.get_tree_entries()[index]
@@ -323,7 +323,7 @@ class SequenceLiteralContext(Sequence):
""" """
if self.array_type == u'dict': if self.array_type == u'dict':
# Get keys. # Get keys.
types = ContextSet() types = NO_CONTEXTS
for k, _ in self.get_tree_entries(): for k, _ in self.get_tree_entries():
types |= self._defining_context.eval_node(k) types |= self._defining_context.eval_node(k)
# We don't know which dict index comes first, therefore always # We don't know which dict index comes first, therefore always
@@ -417,7 +417,7 @@ class DictLiteralContext(SequenceLiteralContext):
@publish_method('values') @publish_method('values')
def _imitate_values(self): def _imitate_values(self):
lazy_context = LazyKnownContexts(self._dict_values()) lazy_context = LazyKnownContexts(self._dict_values())
return ContextSet(FakeSequence(self.evaluator, u'list', [lazy_context])) return ContextSet([FakeSequence(self.evaluator, u'list', [lazy_context])])
@publish_method('items') @publish_method('items')
def _imitate_items(self): def _imitate_items(self):
@@ -429,7 +429,7 @@ class DictLiteralContext(SequenceLiteralContext):
)) for key_node, value_node in self.get_tree_entries() )) for key_node, value_node in self.get_tree_entries()
] ]
return ContextSet(FakeSequence(self.evaluator, u'list', lazy_contexts)) return ContextSet([FakeSequence(self.evaluator, u'list', lazy_contexts)])
def _dict_keys(self): def _dict_keys(self):
return ContextSet.from_sets( return ContextSet.from_sets(
@@ -503,10 +503,10 @@ class FakeDict(_FakeArray):
@publish_method('values') @publish_method('values')
def _values(self): def _values(self):
return ContextSet(FakeSequence( return ContextSet([FakeSequence(
self.evaluator, u'tuple', self.evaluator, u'tuple',
[LazyKnownContexts(self._dict_values())] [LazyKnownContexts(self._dict_values())]
)) )])
def _dict_values(self): def _dict_values(self):
return ContextSet.from_sets(lazy_context.infer() for lazy_context in self._dct.values()) return ContextSet.from_sets(lazy_context.infer() for lazy_context in self._dct.values())
@@ -601,7 +601,7 @@ def _check_array_additions(context, sequence):
module_context = context.get_root_context() module_context = context.get_root_context()
if not settings.dynamic_array_additions or isinstance(module_context, compiled.CompiledObject): if not settings.dynamic_array_additions or isinstance(module_context, compiled.CompiledObject):
debug.dbg('Dynamic array search aborted.', color='MAGENTA') debug.dbg('Dynamic array search aborted.', color='MAGENTA')
return ContextSet() return NO_CONTEXTS
def find_additions(context, arglist, add_name): def find_additions(context, arglist, add_name):
params = list(arguments.TreeArguments(context.evaluator, context, arglist).unpack()) params = list(arguments.TreeArguments(context.evaluator, context, arglist).unpack())
@@ -673,7 +673,7 @@ def get_dynamic_array_instance(instance, arguments):
"""Used for set() and list() instances.""" """Used for set() and list() instances."""
ai = _ArrayInstance(instance, arguments) ai = _ArrayInstance(instance, arguments)
from jedi.evaluate import arguments from jedi.evaluate import arguments
return arguments.ValuesArguments([ContextSet(ai)]) return arguments.ValuesArguments([ContextSet([ai])])
class _ArrayInstance(object): class _ArrayInstance(object):

View File

@@ -197,7 +197,7 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
def py__call__(self, arguments): def py__call__(self, arguments):
from jedi.evaluate.context import TreeInstance from jedi.evaluate.context import TreeInstance
return ContextSet(TreeInstance(self.evaluator, self.parent_context, self, arguments)) return ContextSet([TreeInstance(self.evaluator, self.parent_context, self, arguments)])
def py__class__(self): def py__class__(self):
return compiled.builtin_from_name(self.evaluator, u'type') return compiled.builtin_from_name(self.evaluator, u'type')
@@ -263,8 +263,8 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
def py__getitem__(self, index_context_set, contextualized_node): def py__getitem__(self, index_context_set, contextualized_node):
from jedi.evaluate.context.typing import AnnotatedClass from jedi.evaluate.context.typing import AnnotatedClass
if not index_context_set: if not index_context_set:
return ContextSet(self) return ContextSet([self])
return ContextSet.from_iterable( return ContextSet(
AnnotatedClass( AnnotatedClass(
self.evaluator, self.evaluator,
self.parent_context, self.parent_context,

View File

@@ -39,7 +39,7 @@ class TypingName(AbstractTreeName):
self._context = context self._context = context
def infer(self): def infer(self):
return ContextSet(self._context) return ContextSet([self._context])
class _BaseTypingContext(Context): class _BaseTypingContext(Context):
@@ -72,7 +72,7 @@ class _BaseTypingContext(Context):
class TypingModuleName(NameWrapper): class TypingModuleName(NameWrapper):
def infer(self): def infer(self):
return ContextSet.from_iterable(self._remap()) return ContextSet(self._remap())
def _remap(self): def _remap(self):
name = self.string_name name = self.string_name
@@ -157,22 +157,22 @@ class TypingContextWithIndex(_WithIndexBase):
# Optional is basically just saying it's either None or the actual # Optional is basically just saying it's either None or the actual
# type. # type.
return self._execute_annotations_for_all_indexes() \ return self._execute_annotations_for_all_indexes() \
| ContextSet(builtin_from_name(self.evaluator, u'None')) | ContextSet([builtin_from_name(self.evaluator, u'None')])
elif string_name == 'Type': elif string_name == 'Type':
# The type is actually already given in the index_context # The type is actually already given in the index_context
return ContextSet(self._index_context) return ContextSet([self._index_context])
elif string_name == 'ClassVar': elif string_name == 'ClassVar':
# For now don't do anything here, ClassVars are always used. # For now don't do anything here, ClassVars are always used.
return self._index_context.execute_annotation() return self._index_context.execute_annotation()
cls = globals()[string_name] cls = globals()[string_name]
return ContextSet(cls( return ContextSet([cls(
self.evaluator, self.evaluator,
self.parent_context, self.parent_context,
self._tree_name, self._tree_name,
self._index_context, self._index_context,
self._context_of_index self._context_of_index
)) )])
class TypingContext(_BaseTypingContext): class TypingContext(_BaseTypingContext):
@@ -180,7 +180,7 @@ class TypingContext(_BaseTypingContext):
py__simple_getitem__ = None py__simple_getitem__ = None
def py__getitem__(self, index_context_set, contextualized_node): def py__getitem__(self, index_context_set, contextualized_node):
return ContextSet.from_iterable( return ContextSet(
self.index_class.create_cached( self.index_class.create_cached(
self.evaluator, self.evaluator,
self.parent_context, self.parent_context,
@@ -210,7 +210,7 @@ def _iter_over_arguments(maybe_tuple_context, defining_context):
for lazy_context in maybe_tuple_context.py__iter__(): for lazy_context in maybe_tuple_context.py__iter__():
yield lazy_context.infer() yield lazy_context.infer()
else: else:
yield ContextSet(maybe_tuple_context) yield ContextSet([maybe_tuple_context])
def resolve_forward_references(context_set): def resolve_forward_references(context_set):
for context in context_set: for context in context_set:
@@ -224,7 +224,7 @@ def _iter_over_arguments(maybe_tuple_context, defining_context):
yield context yield context
for context_set in iterate(): for context_set in iterate():
yield ContextSet.from_iterable(resolve_forward_references(context_set)) yield ContextSet(resolve_forward_references(context_set))
class TypeAlias(HelperContextMixin): class TypeAlias(HelperContextMixin):
@@ -341,13 +341,13 @@ class TypeVarClass(_BaseTypingContext):
debug.warning('Found a variable without a name %s', arguments) debug.warning('Found a variable without a name %s', arguments)
return NO_CONTEXTS return NO_CONTEXTS
return ContextSet(TypeVar.create_cached( return ContextSet([TypeVar.create_cached(
self.evaluator, self.evaluator,
self.parent_context, self.parent_context,
self._tree_name, self._tree_name,
var_name, var_name,
unpacked unpacked
)) )])
def _find_string_name(self, lazy_context): def _find_string_name(self, lazy_context):
if lazy_context is None: if lazy_context is None:
@@ -451,7 +451,7 @@ class BoundTypeVarName(AbstractNameDefinition):
yield constraint yield constraint
else: else:
yield context yield context
return ContextSet.from_iterable(iter_()) return ContextSet(iter_())
def py__name__(self): def py__name__(self):
return self._type_var.py__name__() return self._type_var.py__name__()
@@ -530,7 +530,7 @@ class _AbstractAnnotatedClass(ClassContext):
def py__call__(self, arguments): def py__call__(self, arguments):
instance, = super(_AbstractAnnotatedClass, self).py__call__(arguments) instance, = super(_AbstractAnnotatedClass, self).py__call__(arguments)
return ContextSet(InstanceWrapper(instance)) return ContextSet([InstanceWrapper(instance)])
def get_given_types(self): def get_given_types(self):
raise NotImplementedError raise NotImplementedError
@@ -590,7 +590,7 @@ class LazyAnnotatedBaseClass(object):
def _remap_type_vars(self, base): def _remap_type_vars(self, base):
filter = self._class_context.get_type_var_filter() filter = self._class_context.get_type_var_filter()
for type_var_set in base.get_given_types(): for type_var_set in base.get_given_types():
new = ContextSet() new = NO_CONTEXTS
for type_var in type_var_set: for type_var in type_var_set:
if isinstance(type_var, TypeVar): if isinstance(type_var, TypeVar):
names = filter.get(type_var.py__name__()) names = filter.get(type_var.py__name__())
@@ -601,7 +601,7 @@ class LazyAnnotatedBaseClass(object):
# Mostly will be type vars, except if in some cases # Mostly will be type vars, except if in some cases
# a concrete type will already be there. In that # a concrete type will already be there. In that
# case just add it to the context set. # case just add it to the context set.
new |= ContextSet(type_var) new |= ContextSet([type_var])
yield new yield new

View File

@@ -272,7 +272,7 @@ def infer_param(execution_context, param):
from jedi.evaluate.context import FunctionExecutionContext from jedi.evaluate.context import FunctionExecutionContext
def eval_docstring(docstring): def eval_docstring(docstring):
return ContextSet.from_iterable( return ContextSet(
p p
for param_str in _search_param_in_docstr(docstring, param.name.value) for param_str in _search_param_in_docstr(docstring, param.name.value)
for p in _evaluate_for_statement_string(module_context, param_str) for p in _evaluate_for_statement_string(module_context, param_str)

View File

@@ -69,7 +69,7 @@ class AbstractTreeName(AbstractNameDefinition):
class ContextNameMixin(object): class ContextNameMixin(object):
def infer(self): def infer(self):
return ContextSet(self._context) return ContextSet([self._context])
def get_root_context(self): def get_root_context(self):
if self.parent_context is None: if self.parent_context is None:
@@ -384,9 +384,9 @@ class SpecialMethodFilter(DictFilter):
else: else:
continue continue
break break
return ContextSet( return ContextSet([
_BuiltinMappedMethod(self.parent_context, self._callable, builtin_func) _BuiltinMappedMethod(self.parent_context, self._callable, builtin_func)
) ])
def __init__(self, context, dct, builtin_context): def __init__(self, context, dct, builtin_context):
super(SpecialMethodFilter, self).__init__(dct) super(SpecialMethodFilter, self).__init__(dct)

View File

@@ -26,7 +26,7 @@ from jedi.evaluate.arguments import TreeArguments
from jedi.evaluate import helpers from jedi.evaluate import helpers
from jedi.evaluate.context import iterable from jedi.evaluate.context import iterable
from jedi.evaluate.filters import get_global_filters, TreeNameDefinition from jedi.evaluate.filters import get_global_filters, TreeNameDefinition
from jedi.evaluate.base_context import ContextSet from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
from jedi.parser_utils import is_scope, get_parent_scope from jedi.parser_utils import is_scope, get_parent_scope
@@ -60,7 +60,7 @@ class NameFinder(object):
node=self._name, node=self._name,
) )
if check is flow_analysis.UNREACHABLE: if check is flow_analysis.UNREACHABLE:
return ContextSet() return NO_CONTEXTS
return self._found_predefined_types return self._found_predefined_types
types = self._names_to_types(names, attribute_lookup) types = self._names_to_types(names, attribute_lookup)
@@ -266,7 +266,7 @@ def _check_isinstance_type(context, element, search_name):
except AssertionError: except AssertionError:
return None return None
context_set = ContextSet() context_set = NO_CONTEXTS
for cls_or_tup in lazy_context_cls.infer(): for cls_or_tup in lazy_context_cls.infer():
if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple': if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple':
for lazy_context in cls_or_tup.py__iter__(): for lazy_context in cls_or_tup.py__iter__():

View File

@@ -237,5 +237,5 @@ def execute_evaluated(context, *value_list):
# TODO move this out of here to the evaluator. # TODO move this out of here to the evaluator.
from jedi.evaluate.arguments import ValuesArguments from jedi.evaluate.arguments import ValuesArguments
from jedi.evaluate.base_context import ContextSet from jedi.evaluate.base_context import ContextSet
arguments = ValuesArguments([ContextSet(value) for value in value_list]) arguments = ValuesArguments([ContextSet([value]) for value in value_list])
return context.evaluator.execute(context, arguments) return context.evaluator.execute(context, arguments)

View File

@@ -93,7 +93,7 @@ def infer_import(context, tree_name, is_goto=False):
for t in types for t in types
) )
if not is_goto: if not is_goto:
types = ContextSet.from_set(types) types = ContextSet(types)
if not types: if not types:
path = import_path + [from_import_name] path = import_path + [from_import_name]
@@ -450,7 +450,7 @@ def import_module(evaluator, import_names, parent_module_context, sys_path):
safe_module_name=True, safe_module_name=True,
) )
return ContextSet(module) return ContextSet([module])
def _load_module(evaluator, path=None, code=None, sys_path=None, def _load_module(evaluator, path=None, code=None, sys_path=None,
@@ -528,7 +528,7 @@ def get_modules_containing_name(evaluator, modules, name):
sys_path=e_sys_path, sys_path=e_sys_path,
import_names=import_names, import_names=import_names,
) )
evaluator.module_cache.add(import_names, ContextSet(module)) evaluator.module_cache.add(import_names, ContextSet([module]))
return module return module
# skip non python modules # skip non python modules

View File

@@ -16,7 +16,7 @@ class AbstractLazyContext(object):
class LazyKnownContext(AbstractLazyContext): class LazyKnownContext(AbstractLazyContext):
"""data is a context.""" """data is a context."""
def infer(self): def infer(self):
return ContextSet(self.data) return ContextSet([self.data])
class LazyKnownContexts(AbstractLazyContext): class LazyKnownContexts(AbstractLazyContext):

View File

@@ -240,7 +240,7 @@ def infer_return_types(function_execution_context):
for from_, to in zip(unknown_type_vars, context.list_type_vars()) for from_, to in zip(unknown_type_vars, context.list_type_vars())
} }
return type_var_dict return type_var_dict
return ContextSet.from_iterable( return ContextSet(
define_type_vars( define_type_vars(
annotation_context, annotation_context,
remap_type_vars(annotation_context, type_var_dict), remap_type_vars(annotation_context, type_var_dict),

View File

@@ -51,7 +51,7 @@ def _limit_context_infers(func):
def _py__stop_iteration_returns(generators): def _py__stop_iteration_returns(generators):
results = ContextSet() results = NO_CONTEXTS
for generator in generators: for generator in generators:
try: try:
method = generator.py__stop_iteration_returns method = generator.py__stop_iteration_returns
@@ -71,7 +71,7 @@ def eval_node(context, element):
if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword'): if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword'):
return eval_atom(context, element) return eval_atom(context, element)
elif typ == 'lambdef': elif typ == 'lambdef':
return ContextSet(FunctionContext.from_context(context, element)) return ContextSet([FunctionContext.from_context(context, element)])
elif typ == 'expr_stmt': elif typ == 'expr_stmt':
return eval_expr_stmt(context, element) return eval_expr_stmt(context, element)
elif typ in ('power', 'atom_expr'): elif typ in ('power', 'atom_expr'):
@@ -100,12 +100,11 @@ def eval_node(context, element):
await_context_set = context_set.py__getattribute__(u"__await__") await_context_set = context_set.py__getattribute__(u"__await__")
if not await_context_set: if not await_context_set:
debug.warning('Tried to run py__await__ on context %s', context) debug.warning('Tried to run py__await__ on context %s', context)
context_set = ContextSet()
return _py__stop_iteration_returns(await_context_set.execute_evaluated()) return _py__stop_iteration_returns(await_context_set.execute_evaluated())
return context_set return context_set
elif typ in ('testlist_star_expr', 'testlist',): elif typ in ('testlist_star_expr', 'testlist',):
# The implicit tuple in statements. # The implicit tuple in statements.
return ContextSet(iterable.SequenceLiteralContext(evaluator, context, element)) return ContextSet([iterable.SequenceLiteralContext(evaluator, context, element)])
elif typ in ('not_test', 'factor'): elif typ in ('not_test', 'factor'):
context_set = context.eval_node(element.children[-1]) context_set = context.eval_node(element.children[-1])
for operator in element.children[:-1]: for operator in element.children[:-1]:
@@ -122,7 +121,7 @@ def eval_node(context, element):
if element.value not in ('.', '...'): if element.value not in ('.', '...'):
origin = element.parent origin = element.parent
raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin)) raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin))
return ContextSet(compiled.builtin_from_name(evaluator, u'Ellipsis')) return ContextSet([compiled.builtin_from_name(evaluator, u'Ellipsis')])
elif typ == 'dotted_name': elif typ == 'dotted_name':
context_set = eval_atom(context, element.children[0]) context_set = eval_atom(context, element.children[0])
for next_name in element.children[2::2]: for next_name in element.children[2::2]:
@@ -158,7 +157,7 @@ def eval_trailer(context, base_contexts, trailer):
foo = set(base_contexts) foo = set(base_contexts)
# special case: PEP0484 typing module, see # special case: PEP0484 typing module, see
# https://github.com/davidhalter/jedi/issues/663 # https://github.com/davidhalter/jedi/issues/663
result = ContextSet() result = NO_CONTEXTS
for typ in list(foo): for typ in list(foo):
continue continue
if isinstance(typ, (ClassContext, TreeInstance)): if isinstance(typ, (ClassContext, TreeInstance)):
@@ -213,7 +212,7 @@ def eval_atom(context, atom):
elif atom.type == 'keyword': elif atom.type == 'keyword':
# For False/True/None # For False/True/None
if atom.value in ('False', 'True', 'None'): if atom.value in ('False', 'True', 'None'):
return ContextSet(compiled.builtin_from_name(context.evaluator, atom.value)) return ContextSet([compiled.builtin_from_name(context.evaluator, atom.value)])
elif atom.value == 'print': elif atom.value == 'print':
# print e.g. could be evaluated like this in Python 2.7 # print e.g. could be evaluated like this in Python 2.7
return NO_CONTEXTS return NO_CONTEXTS
@@ -225,7 +224,7 @@ def eval_atom(context, atom):
elif isinstance(atom, tree.Literal): elif isinstance(atom, tree.Literal):
string = context.evaluator.compiled_subprocess.safe_literal_eval(atom.value) string = context.evaluator.compiled_subprocess.safe_literal_eval(atom.value)
return ContextSet(compiled.create_simple_object(context.evaluator, string)) return ContextSet([compiled.create_simple_object(context.evaluator, string)])
elif atom.type == 'strings': elif atom.type == 'strings':
# Will be multiple string. # Will be multiple string.
context_set = eval_atom(context, atom.children[0]) context_set = eval_atom(context, atom.children[0])
@@ -254,9 +253,9 @@ def eval_atom(context, atom):
pass pass
if comp_for.type == 'comp_for': if comp_for.type == 'comp_for':
return ContextSet(iterable.comprehension_from_atom( return ContextSet([iterable.comprehension_from_atom(
context.evaluator, context, atom context.evaluator, context, atom
)) )])
# It's a dict/list/tuple literal. # It's a dict/list/tuple literal.
array_node = c[1] array_node = c[1]
@@ -269,7 +268,7 @@ def eval_atom(context, atom):
context = iterable.DictLiteralContext(context.evaluator, context, atom) context = iterable.DictLiteralContext(context.evaluator, context, atom)
else: else:
context = iterable.SequenceLiteralContext(context.evaluator, context, atom) context = iterable.SequenceLiteralContext(context.evaluator, context, atom)
return ContextSet(context) return ContextSet([context])
@_limit_context_infers @_limit_context_infers
@@ -397,7 +396,7 @@ def _literals_to_types(evaluator, result):
cls = compiled.builtin_from_name(evaluator, typ.name.string_name) cls = compiled.builtin_from_name(evaluator, typ.name.string_name)
new_result |= helpers.execute_evaluated(cls) new_result |= helpers.execute_evaluated(cls)
else: else:
new_result |= ContextSet(typ) new_result |= ContextSet([typ])
return new_result return new_result
@@ -459,26 +458,26 @@ def _eval_comparison_part(evaluator, context, left, operator, right):
if str_operator == '*': if str_operator == '*':
# for iterables, ignore * operations # for iterables, ignore * operations
if isinstance(left, iterable.Sequence) or is_string(left): if isinstance(left, iterable.Sequence) or is_string(left):
return ContextSet(left) return ContextSet([left])
elif isinstance(right, iterable.Sequence) or is_string(right): elif isinstance(right, iterable.Sequence) or is_string(right):
return ContextSet(right) return ContextSet([right])
elif str_operator == '+': elif str_operator == '+':
if l_is_num and r_is_num or is_string(left) and is_string(right): if l_is_num and r_is_num or is_string(left) and is_string(right):
return ContextSet(left.execute_operation(right, str_operator)) return ContextSet([left.execute_operation(right, str_operator)])
elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right): elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right):
return ContextSet(iterable.MergedArray(evaluator, (left, right))) return ContextSet([iterable.MergedArray(evaluator, (left, right))])
elif str_operator == '-': elif str_operator == '-':
if l_is_num and r_is_num: if l_is_num and r_is_num:
return ContextSet(left.execute_operation(right, str_operator)) return ContextSet([left.execute_operation(right, str_operator)])
elif str_operator == '%': elif str_operator == '%':
# With strings and numbers the left type typically remains. Except for # With strings and numbers the left type typically remains. Except for
# `int() % float()`. # `int() % float()`.
return ContextSet(left) return ContextSet([left])
elif str_operator in COMPARISON_OPERATORS: elif str_operator in COMPARISON_OPERATORS:
if is_compiled(left) and is_compiled(right): if is_compiled(left) and is_compiled(right):
# Possible, because the return is not an option. Just compare. # Possible, because the return is not an option. Just compare.
try: try:
return ContextSet(left.execute_operation(right, str_operator)) return ContextSet([left.execute_operation(right, str_operator)])
except TypeError: except TypeError:
# Could be True or False. # Could be True or False.
pass pass
@@ -486,9 +485,9 @@ def _eval_comparison_part(evaluator, context, left, operator, right):
if str_operator in ('is', '!=', '==', 'is not'): if str_operator in ('is', '!=', '==', 'is not'):
operation = COMPARISON_OPERATORS[str_operator] operation = COMPARISON_OPERATORS[str_operator]
bool_ = operation(left, right) bool_ = operation(left, right)
return ContextSet(_bool_to_context(evaluator, bool_)) return ContextSet([_bool_to_context(evaluator, bool_)])
return ContextSet(_bool_to_context(evaluator, True), _bool_to_context(evaluator, False)) return ContextSet([_bool_to_context(evaluator, True), _bool_to_context(evaluator, False)])
elif str_operator == 'in': elif str_operator == 'in':
return NO_CONTEXTS return NO_CONTEXTS
@@ -504,7 +503,7 @@ def _eval_comparison_part(evaluator, context, left, operator, right):
analysis.add(context, 'type-error-operation', operator, analysis.add(context, 'type-error-operation', operator,
message % (left, right)) message % (left, right))
result = ContextSet(left, right) result = ContextSet([left, right])
debug.dbg('Used operator %s resulting in %s', result) debug.dbg('Used operator %s resulting in %s', result)
return result return result
@@ -525,7 +524,7 @@ def _remove_statements(evaluator, context, stmt, name):
def tree_name_to_contexts(evaluator, context, tree_name): def tree_name_to_contexts(evaluator, context, tree_name):
context_set = ContextSet() context_set = NO_CONTEXTS
module_node = context.get_root_context().tree_node module_node = context.get_root_context().tree_node
if module_node is not None: if module_node is not None:
names = module_node.get_used_names().get(tree_name.value, []) names = module_node.get_used_names().get(tree_name.value, [])
@@ -611,7 +610,7 @@ def _apply_decorators(context, node):
) )
else: else:
decoratee_context = FunctionContext.from_context(context, node) decoratee_context = FunctionContext.from_context(context, node)
initial = values = ContextSet(decoratee_context) initial = values = ContextSet([decoratee_context])
for dec in reversed(node.get_decorators()): for dec in reversed(node.get_decorators()):
debug.dbg('decorator: %s %s', dec, values) debug.dbg('decorator: %s %s', dec, values)
dec_values = context.eval_node(dec.children[1]) dec_values = context.eval_node(dec.children[1])
@@ -651,7 +650,7 @@ def check_tuple_assignments(evaluator, contextualized_name, context_set):
# would allow this loop to run for a very long time if the # would allow this loop to run for a very long time if the
# index number is high. Therefore break if the loop is # index number is high. Therefore break if the loop is
# finished. # finished.
return ContextSet() return NO_CONTEXTS
context_set = lazy_context.infer() context_set = lazy_context.infer()
return context_set return context_set
@@ -662,7 +661,7 @@ def eval_subscript_list(evaluator, context, index):
""" """
if index == ':': if index == ':':
# Like array[:] # Like array[:]
return ContextSet(iterable.Slice(context, None, None, None)) return ContextSet([iterable.Slice(context, None, None, None)])
elif index.type == 'subscript' and not index.children[0] == '.': elif index.type == 'subscript' and not index.children[0] == '.':
# subscript basically implies a slice operation, except for Python 2's # subscript basically implies a slice operation, except for Python 2's
@@ -680,9 +679,9 @@ def eval_subscript_list(evaluator, context, index):
result.append(el) result.append(el)
result += [None] * (3 - len(result)) result += [None] * (3 - len(result))
return ContextSet(iterable.Slice(context, *result)) return ContextSet([iterable.Slice(context, *result)])
elif index.type == 'subscriptlist': elif index.type == 'subscriptlist':
return ContextSet(iterable.SequenceLiteralContext(evaluator, context, index)) return ContextSet([iterable.SequenceLiteralContext(evaluator, context, index)])
# No slices # No slices
return context.eval_node(index) return context.eval_node(index)

View File

@@ -224,7 +224,7 @@ def builtins_reversed(sequences, obj, arguments):
# would fail in certain cases like `reversed(x).__iter__` if we # would fail in certain cases like `reversed(x).__iter__` if we
# just returned the result directly. # just returned the result directly.
instance = TreeInstance(obj.evaluator, obj.parent_context, obj, ValuesArguments([])) instance = TreeInstance(obj.evaluator, obj.parent_context, obj, ValuesArguments([]))
return ContextSet(ReversedObject(instance, list(reversed(ordered)))) return ContextSet([ReversedObject(instance, list(reversed(ordered)))])
@argument_clinic('obj, type, /', want_arguments=True, want_evaluator=True) @argument_clinic('obj, type, /', want_arguments=True, want_evaluator=True)
@@ -263,7 +263,7 @@ def builtins_isinstance(objects, types, arguments, evaluator):
'not %s.' % cls_or_tup 'not %s.' % cls_or_tup
analysis.add(lazy_context._context, 'type-error-isinstance', node, message) analysis.add(lazy_context._context, 'type-error-isinstance', node, message)
return ContextSet.from_iterable( return ContextSet(
compiled.builtin_from_name(evaluator, force_unicode(str(b))) compiled.builtin_from_name(evaluator, force_unicode(str(b)))
for b in bool_results for b in bool_results
) )
@@ -328,7 +328,7 @@ def collections_namedtuple(obj, arguments):
code_lines=parso.split_lines(code, keepends=True), code_lines=parso.split_lines(code, keepends=True),
) )
return ContextSet(ClassContext(evaluator, parent_context, generated_class)) return ContextSet([ClassContext(evaluator, parent_context, generated_class)])
class PartialObject(object): class PartialObject(object):
@@ -367,7 +367,7 @@ class MergedPartialArguments(AbstractArguments):
def functools_partial(obj, arguments): def functools_partial(obj, arguments):
return ContextSet.from_iterable( return ContextSet(
PartialObject(instance, arguments) PartialObject(instance, arguments)
for instance in obj.py__call__(arguments) for instance in obj.py__call__(arguments)
) )
@@ -395,7 +395,7 @@ class ItemGetterCallable(object):
@repack_with_argument_clinic('item, /') @repack_with_argument_clinic('item, /')
def py__call__(self, item_context_set): def py__call__(self, item_context_set):
context_set = ContextSet() context_set = NO_CONTEXTS
for args_context in self._args_context_set: for args_context in self._args_context_set:
lazy_contexts = list(args_context.py__iter__()) lazy_contexts = list(args_context.py__iter__())
if len(lazy_contexts) == 1: if len(lazy_contexts) == 1:
@@ -411,7 +411,7 @@ class ItemGetterCallable(object):
def _operator_itemgetter(args_context_set, obj, arguments): def _operator_itemgetter(args_context_set, obj, arguments):
# final = obj.py__call__(arguments) # final = obj.py__call__(arguments)
# TODO use this as a context wrapper # TODO use this as a context wrapper
return ContextSet(ItemGetterCallable(obj.evaluator, args_context_set)) return ContextSet([ItemGetterCallable(obj.evaluator, args_context_set)])
_implemented = { _implemented = {

View File

@@ -164,7 +164,7 @@ class TypeshedPlugin(BasePlugin):
code_lines=[], code_lines=[],
) )
modules = _merge_modules(context_set, stub_module_context) modules = _merge_modules(context_set, stub_module_context)
return ContextSet.from_iterable(modules) return ContextSet(modules)
# If no stub is found, just return the default. # If no stub is found, just return the default.
return context_set return context_set
return wrapper return wrapper