mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-06 14:04:26 +08:00
Improvements towards arrays / predefined names logic.
This commit is contained in:
@@ -3,7 +3,6 @@ import sys
|
||||
import contextlib
|
||||
import functools
|
||||
import re
|
||||
from itertools import chain
|
||||
from ast import literal_eval
|
||||
|
||||
from jedi._compatibility import unicode, reraise
|
||||
@@ -184,7 +183,7 @@ def splitlines(string, keepends=False):
|
||||
|
||||
def unite(iterable):
|
||||
"""Turns a two dimensional array into a one dimensional."""
|
||||
return set(chain.from_iterable(iterable))
|
||||
return set(typ for types in iterable for typ in types)
|
||||
|
||||
|
||||
def to_list(func):
|
||||
|
||||
@@ -91,7 +91,6 @@ class Evaluator(object):
|
||||
self.compiled_cache = {} # see `evaluate.compiled.create()`
|
||||
self.mixed_cache = {} # see `evaluate.compiled.mixed.create()`
|
||||
self.analysis = []
|
||||
self.predefined_if_name_dict_dict = {}
|
||||
self.dynamic_params_depth = 0
|
||||
self.is_analysis = False
|
||||
|
||||
@@ -165,17 +164,12 @@ class Evaluator(object):
|
||||
for_iterables = self.eval_element(context, node)
|
||||
ordered = list(iterable.py__iter__(self, for_iterables, node))
|
||||
|
||||
for index_types in ordered:
|
||||
dct = {str(for_stmt.children[1]): index_types}
|
||||
self.predefined_if_name_dict_dict[for_stmt] = dct
|
||||
for lazy_context in ordered:
|
||||
dct = {str(for_stmt.children[1]): lazy_context.infer()}
|
||||
with helpers.predefine_names(context, for_stmt, dct):
|
||||
t = self.eval_element(context, rhs)
|
||||
left = precedence.calculate(self, left, operator, t)
|
||||
types = left
|
||||
if ordered:
|
||||
# If there are no for entries, we cannot iterate and the
|
||||
# types are defined by += entries. Therefore the for loop
|
||||
# is never called.
|
||||
del self.predefined_if_name_dict_dict[for_stmt]
|
||||
else:
|
||||
types = precedence.calculate(self, left, operator, types)
|
||||
debug.dbg('eval_statement result %s', types)
|
||||
@@ -183,12 +177,12 @@ class Evaluator(object):
|
||||
|
||||
def eval_element(self, context, element):
|
||||
if_stmt = element.get_parent_until((tree.IfStmt, tree.ForStmt, tree.IsScope))
|
||||
predefined_if_name_dict = self.predefined_if_name_dict_dict.get(if_stmt)
|
||||
predefined_if_name_dict = context.predefined_names.get(if_stmt)
|
||||
if predefined_if_name_dict is None and isinstance(if_stmt, tree.IfStmt):
|
||||
if_stmt_test = if_stmt.children[1]
|
||||
name_dicts = [{}]
|
||||
# If we already did a check, we don't want to do it again -> If
|
||||
# predefined_if_name_dict_dict is filled, we stop.
|
||||
# context.predefined_names is filled, we stop.
|
||||
# We don't want to check the if stmt itself, it's just about
|
||||
# the content.
|
||||
if element.start_pos > if_stmt_test.end_pos:
|
||||
@@ -227,11 +221,8 @@ class Evaluator(object):
|
||||
if len(name_dicts) > 1:
|
||||
result = set()
|
||||
for name_dict in name_dicts:
|
||||
self.predefined_if_name_dict_dict[if_stmt] = name_dict
|
||||
try:
|
||||
with helpers.predefine_names(context, if_stmt, name_dict):
|
||||
result |= self._eval_element_not_cached(context, element)
|
||||
finally:
|
||||
del self.predefined_if_name_dict_dict[if_stmt]
|
||||
return result
|
||||
else:
|
||||
return self._eval_element_if_evaluated(context, element)
|
||||
@@ -250,7 +241,7 @@ class Evaluator(object):
|
||||
parent = element
|
||||
while parent is not None:
|
||||
parent = parent.parent
|
||||
predefined_if_name_dict = self.predefined_if_name_dict_dict.get(parent)
|
||||
predefined_if_name_dict = context.predefined_names.get(parent)
|
||||
if predefined_if_name_dict is not None:
|
||||
return self._eval_element_not_cached(context, element)
|
||||
return self._eval_element_cached(context, element)
|
||||
|
||||
@@ -112,10 +112,10 @@ def _check_for_setattr(instance):
|
||||
|
||||
def add_attribute_error(evaluator, scope, name):
|
||||
message = ('AttributeError: %s has no attribute %s.' % (scope, name))
|
||||
from jedi.evaluate.representation import Instance
|
||||
from jedi.evaluate.instance import AbstractInstanceContext
|
||||
# Check for __getattr__/__getattribute__ existance and issue a warning
|
||||
# instead of an error, if that happens.
|
||||
if isinstance(scope, Instance):
|
||||
if isinstance(scope, AbstractInstanceContext):
|
||||
typ = Warning
|
||||
try:
|
||||
scope.get_subscope_by_name('__getattr__')
|
||||
|
||||
@@ -38,14 +38,14 @@ class CheckAttribute(object):
|
||||
|
||||
|
||||
class CompiledObject(Context):
|
||||
# comply with the parser
|
||||
start_pos = 0, 0
|
||||
path = None # modules have this attribute - set it to None.
|
||||
used_names = {} # To be consistent with modules.
|
||||
|
||||
def __init__(self, evaluator, obj, parent_context=None):
|
||||
def __init__(self, evaluator, obj, parent_context=None, faked_class=None):
|
||||
super(CompiledObject, self).__init__(evaluator, parent_context)
|
||||
self.obj = obj
|
||||
# This attribute will not be set for most classes, except for fakes.
|
||||
self.classdef = faked_class
|
||||
|
||||
def get_root_node(self):
|
||||
# To make things a bit easier with filters we add this method here.
|
||||
@@ -232,25 +232,6 @@ class CompiledObject(Context):
|
||||
for result in self.evaluator.execute(bltn_obj, params):
|
||||
yield result
|
||||
|
||||
@property
|
||||
@underscore_memoization
|
||||
def subscopes(self):
|
||||
"""
|
||||
Returns only the faked scopes - the other ones are not important for
|
||||
internal analysis.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
module = self.get_parent_until()
|
||||
faked_subscopes = []
|
||||
for name in dir(self.obj):
|
||||
try:
|
||||
faked_subscopes.append(
|
||||
fake.get_faked(self.evaluator, module, self.obj, parent=self, name=name)
|
||||
)
|
||||
except fake.FakeDoesNotExist:
|
||||
pass
|
||||
return faked_subscopes
|
||||
|
||||
def is_scope(self):
|
||||
return True
|
||||
|
||||
@@ -260,13 +241,6 @@ class CompiledObject(Context):
|
||||
def get_imports(self):
|
||||
return [] # Builtins don't have imports
|
||||
|
||||
@property
|
||||
def classdef(self):
|
||||
"""
|
||||
This is used to be able to work with compiled fakes.
|
||||
"""
|
||||
return self
|
||||
|
||||
|
||||
class CompiledName(AbstractNameDefinition):
|
||||
def __init__(self, evaluator, parent_context, name):
|
||||
@@ -534,7 +508,10 @@ def _parse_function_doc(doc):
|
||||
def _create_from_name(evaluator, module, compiled_object, name):
|
||||
obj = compiled_object.obj
|
||||
try:
|
||||
return fake.get_faked(evaluator, module, obj, parent_context=compiled_object, name=name)
|
||||
faked = fake.get_faked(evaluator, module, obj, parent_context=compiled_object, name=name)
|
||||
if faked.type == 'funcdef':
|
||||
from jedi.evaluate.representation import FunctionContext
|
||||
return FunctionContext(evaluator, compiled_object, faked)
|
||||
except fake.FakeDoesNotExist:
|
||||
pass
|
||||
|
||||
@@ -607,6 +584,7 @@ def create(evaluator, obj, parent_context=None, module=None):
|
||||
A very weird interface class to this module. The more options provided the
|
||||
more acurate loading compiled objects is.
|
||||
"""
|
||||
faked = None
|
||||
if inspect.ismodule(obj):
|
||||
if parent_context is not None:
|
||||
# Modules don't have parents, be careful with caching: recurse.
|
||||
@@ -616,8 +594,11 @@ def create(evaluator, obj, parent_context=None, module=None):
|
||||
return create(evaluator, obj, create(evaluator, _builtins))
|
||||
|
||||
try:
|
||||
return fake.get_faked(evaluator, module, obj, parent_context=parent_context)
|
||||
faked = fake.get_faked(evaluator, module, obj, parent_context=parent_context)
|
||||
if faked.type == 'funcdef':
|
||||
from jedi.evaluate.representation import FunctionContext
|
||||
return FunctionContext(evaluator, parent_context, faked)
|
||||
except fake.FakeDoesNotExist:
|
||||
pass
|
||||
|
||||
return CompiledObject(evaluator, obj, parent_context)
|
||||
return CompiledObject(evaluator, obj, parent_context, faked)
|
||||
|
||||
@@ -118,7 +118,7 @@ def _faked(module, obj, name):
|
||||
# Having the module as a `parser.tree.Module`, we need to scan
|
||||
# for methods.
|
||||
if name is None:
|
||||
if inspect.isbuiltin(obj):
|
||||
if inspect.isbuiltin(obj) or inspect.isclass(obj):
|
||||
return _search_scope(faked_mod, obj.__name__), faked_mod
|
||||
elif not inspect.isclass(obj):
|
||||
# object is a method or descriptor
|
||||
@@ -172,9 +172,11 @@ def memoize_faked(obj):
|
||||
def _get_faked(module, obj, name=None):
|
||||
obj = type(obj) if is_class_instance(obj) else obj
|
||||
result, fake_module = _faked(module, obj, name)
|
||||
if result is None or result.type == 'classdef':
|
||||
if result is None:
|
||||
# We're not interested in classes. What we want is functions.
|
||||
raise FakeDoesNotExist
|
||||
elif result.type == 'classdef':
|
||||
return result, fake_module
|
||||
else:
|
||||
# Set the docstr which was previously not set (faked modules don't
|
||||
# contain it).
|
||||
@@ -192,8 +194,7 @@ def get_faked(evaluator, module, obj, name=None, parent_context=None):
|
||||
faked, fake_module = _get_faked(module and module.obj, obj, name)
|
||||
if module is not None:
|
||||
module.used_names = fake_module.used_names
|
||||
from jedi.evaluate.representation import FunctionContext
|
||||
return FunctionContext(evaluator, parent_context, faked)
|
||||
return faked
|
||||
|
||||
|
||||
def is_class_instance(obj):
|
||||
|
||||
@@ -8,6 +8,7 @@ class Context(object):
|
||||
Most contexts are just instances of something, therefore make this the
|
||||
default to make subclassing a lot easier.
|
||||
"""
|
||||
predefined_names = {}
|
||||
|
||||
def __init__(self, evaluator, parent_context=None):
|
||||
self.evaluator = evaluator
|
||||
@@ -48,7 +49,11 @@ class Context(object):
|
||||
|
||||
|
||||
class TreeContext(Context):
|
||||
pass
|
||||
def __init__(self, evaluator, parent_context=None):
|
||||
super(TreeContext, self).__init__(evaluator, parent_context)
|
||||
self.predefined_names = {}
|
||||
|
||||
|
||||
class FlowContext(TreeContext):
|
||||
def get_parent_flow_context(self):
|
||||
if 1:
|
||||
@@ -88,8 +93,8 @@ class LazyUnknownContext(AbstractLazyContext):
|
||||
|
||||
class LazyTreeContext(AbstractLazyContext):
|
||||
def __init__(self, context, node):
|
||||
super(LazyTreeContext, self).__init__(node)
|
||||
self._context = context
|
||||
self._data = node
|
||||
|
||||
def infer(self):
|
||||
return self._context.eval_node(self._data)
|
||||
|
||||
@@ -95,12 +95,16 @@ def filter_definition_names(names, origin, position=None):
|
||||
|
||||
|
||||
class NameFinder(object):
|
||||
def __init__(self, evaluator, context, name_str, position=None):
|
||||
def __init__(self, evaluator, context, name_or_str, position=None):
|
||||
self._evaluator = evaluator
|
||||
# Make sure that it's not just a syntax tree node.
|
||||
self.context = context
|
||||
self.name_str = name_str
|
||||
self.position = position
|
||||
self._context = context
|
||||
self._name = name_or_str
|
||||
if isinstance(name_or_str, tree.Name):
|
||||
self._string_name = name_or_str.value
|
||||
else:
|
||||
self._string_name = name_or_str
|
||||
self._position = position
|
||||
self._found_predefined_if_name = None
|
||||
|
||||
@debug.increase_indent
|
||||
@@ -118,30 +122,30 @@ class NameFinder(object):
|
||||
types = self._names_to_types(names, attribute_lookup)
|
||||
|
||||
if not names and not types \
|
||||
and not (isinstance(self.name_str, tree.Name) and
|
||||
isinstance(self.name_str.parent.parent, tree.Param)):
|
||||
if not isinstance(self.name_str, (str, unicode)): # TODO Remove?
|
||||
and not (isinstance(self._name, tree.Name) and
|
||||
isinstance(self._name.parent.parent, tree.Param)):
|
||||
if isinstance(self._name, tree.Name):
|
||||
if attribute_lookup:
|
||||
analysis.add_attribute_error(self._evaluator,
|
||||
self.context, self.name_str)
|
||||
self._context, self._name)
|
||||
else:
|
||||
message = ("NameError: name '%s' is not defined."
|
||||
% self.name_str)
|
||||
analysis.add(self._evaluator, 'name-error', self.name_str,
|
||||
% self._string_name)
|
||||
analysis.add(self._evaluator, 'name-error', self._name,
|
||||
message)
|
||||
|
||||
return types
|
||||
|
||||
def get_filters(self, search_global=False):
|
||||
if isinstance(self.name_str, tree.Name):
|
||||
origin_scope = self.name_str.get_parent_until(tree.Scope, reverse=True)
|
||||
if isinstance(self._name, tree.Name):
|
||||
origin_scope = self._name.get_parent_until(tree.Scope, reverse=True)
|
||||
else:
|
||||
origin_scope = None
|
||||
|
||||
if search_global:
|
||||
return get_global_filters(self._evaluator, self.context, self.position, origin_scope)
|
||||
return get_global_filters(self._evaluator, self._context, self._position, origin_scope)
|
||||
else:
|
||||
return self.context.get_filters(search_global, self.position, origin_scope=origin_scope)
|
||||
return self._context.get_filters(search_global, self._position, origin_scope=origin_scope)
|
||||
|
||||
def names_dict_lookup(self, names_dict, position):
|
||||
def get_param(scope, el):
|
||||
@@ -149,15 +153,14 @@ class NameFinder(object):
|
||||
return scope.param_by_name(str(el))
|
||||
return el
|
||||
|
||||
search_str = str(self.name_str)
|
||||
try:
|
||||
names = names_dict[search_str]
|
||||
names = names_dict[self._string_name]
|
||||
if not names: # We want names, otherwise stop.
|
||||
return []
|
||||
except KeyError:
|
||||
return []
|
||||
|
||||
names = filter_definition_names(names, self.name_str, position)
|
||||
names = filter_definition_names(names, self._name, position)
|
||||
|
||||
name_scope = None
|
||||
# Only the names defined in the last position are valid definitions.
|
||||
@@ -166,7 +169,7 @@ class NameFinder(object):
|
||||
stmt = name.get_definition()
|
||||
name_scope = self._evaluator.wrap(stmt.get_parent_scope())
|
||||
|
||||
if isinstance(self.context, er.Instance) and not isinstance(name_scope, er.Instance):
|
||||
if isinstance(self._context, er.Instance) and not isinstance(name_scope, er.Instance):
|
||||
# Instances should not be checked for positioning, because we
|
||||
# don't know in which order the functions are called.
|
||||
last_names.append(name)
|
||||
@@ -190,27 +193,27 @@ class NameFinder(object):
|
||||
last_names.append(name)
|
||||
continue
|
||||
|
||||
if isinstance(self.name_str, tree.Name):
|
||||
origin_scope = self.name_str.get_parent_until(tree.Scope, reverse=True)
|
||||
scope = self.name_str
|
||||
if isinstance(self._name, tree.Name):
|
||||
origin_scope = self._name.get_parent_until(tree.Scope, reverse=True)
|
||||
scope = self._name
|
||||
check = None
|
||||
while True:
|
||||
scope = scope.parent
|
||||
if scope.type in ("if_stmt", "for_stmt", "comp_for"):
|
||||
try:
|
||||
name_dict = self._evaluator.predefined_if_name_dict_dict[scope]
|
||||
types = set(name_dict[str(self.name_str)])
|
||||
name_dict = self.context.predefined_names[scope]
|
||||
types = set(name_dict[self._string_name])
|
||||
except KeyError:
|
||||
continue
|
||||
else:
|
||||
if self.name_str.start_pos < scope.children[1].end_pos:
|
||||
if self._name.start_pos < scope.children[1].end_pos:
|
||||
# It doesn't make any sense to check if
|
||||
# statements in the if statement itself, just
|
||||
# deliver types.
|
||||
self._found_predefined_if_name = types
|
||||
else:
|
||||
check = flow_analysis.reachability_check(
|
||||
self._context, self.context, origin_scope)
|
||||
self._context, self._context, origin_scope)
|
||||
if check is flow_analysis.UNREACHABLE:
|
||||
self._found_predefined_if_name = set()
|
||||
else:
|
||||
@@ -243,26 +246,26 @@ class NameFinder(object):
|
||||
`names_dicts`), until a name fits.
|
||||
"""
|
||||
names = []
|
||||
for filter in filters:
|
||||
if self._evaluator.predefined_if_name_dict_dict:
|
||||
node = self.name_str
|
||||
if self._context.predefined_names:
|
||||
# TODO is this ok? node might not always be a tree.Name
|
||||
node = self._name
|
||||
while node is not None and not isinstance(node, tree.IsScope):
|
||||
node = node.parent
|
||||
if node.type in ("if_stmt", "for_stmt", "comp_for"):
|
||||
try:
|
||||
name_dict = self._evaluator.predefined_if_name_dict_dict[node]
|
||||
types = set(name_dict[str(self.name_str)])
|
||||
name_dict = self._context.predefined_names[node]
|
||||
types = name_dict[self._string_name]
|
||||
except KeyError:
|
||||
continue
|
||||
else:
|
||||
self._found_predefined_if_name = types
|
||||
return []
|
||||
else:
|
||||
names = filter.get(self.name_str)
|
||||
for filter in filters:
|
||||
names = filter.get(self._name)
|
||||
if names:
|
||||
break
|
||||
debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self.name_str,
|
||||
self.context, names, self.position)
|
||||
debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self._string_name,
|
||||
self._context, names, self._position)
|
||||
return list(self._clean_names(names))
|
||||
|
||||
def _clean_names(self, names):
|
||||
@@ -288,7 +291,7 @@ class NameFinder(object):
|
||||
def _check_getattr(self, inst):
|
||||
"""Checks for both __getattr__ and __getattribute__ methods"""
|
||||
# str is important, because it shouldn't be `Name`!
|
||||
name = compiled.create(self._evaluator, str(self.name_str))
|
||||
name = compiled.create(self._evaluator, self._string_name)
|
||||
|
||||
# This is a little bit special. `__getattribute__` is in Python
|
||||
# executed before `__getattr__`. But: I know no use case, where
|
||||
@@ -305,9 +308,9 @@ class NameFinder(object):
|
||||
types = set()
|
||||
|
||||
# Add isinstance and other if/assert knowledge.
|
||||
#if isinstance(self.name_str, tree.Name):
|
||||
#if isinstance(self._name, tree.Name):
|
||||
## Ignore FunctionExecution parents for now.
|
||||
#flow_scope = self.name_str
|
||||
#flow_scope = self._name
|
||||
#until = flow_scope.get_parent_until(er.FunctionExecution)
|
||||
#while not isinstance(until, er.FunctionExecution):
|
||||
#flow_scope = flow_scope.get_parent_scope(include_flows=True)
|
||||
@@ -315,22 +318,22 @@ class NameFinder(object):
|
||||
#break
|
||||
## TODO check if result is in scope -> no evaluation necessary
|
||||
#n = check_flow_information(self._evaluator, flow_scope,
|
||||
#self.name_str, self.position)
|
||||
#self._name, self._position)
|
||||
#if n:
|
||||
#return n
|
||||
|
||||
for name in names:
|
||||
new_types = name.infer()
|
||||
if isinstance(self.context, (er.ClassContext, AbstractInstanceContext)) \
|
||||
if isinstance(self._context, (er.ClassContext, AbstractInstanceContext)) \
|
||||
and attribute_lookup:
|
||||
types |= set(self._resolve_descriptors(name, new_types))
|
||||
else:
|
||||
types |= set(new_types)
|
||||
|
||||
debug.dbg('finder._names_to_types: %s -> %s', names, types)
|
||||
if not names and isinstance(self.context, AbstractInstanceContext):
|
||||
if not names and isinstance(self._context, AbstractInstanceContext):
|
||||
# handling __getattr__ / __getattribute__
|
||||
return self._check_getattr(self.context)
|
||||
return self._check_getattr(self._context)
|
||||
|
||||
return types
|
||||
|
||||
@@ -347,7 +350,7 @@ class NameFinder(object):
|
||||
except AttributeError:
|
||||
result.add(r)
|
||||
else:
|
||||
result |= desc_return(self.context)
|
||||
result |= desc_return(self._context)
|
||||
return result
|
||||
|
||||
|
||||
@@ -582,8 +585,9 @@ def _check_isinstance_type(evaluator, element, search_name):
|
||||
result = set()
|
||||
for cls_or_tup in evaluator.eval_element(classes):
|
||||
if isinstance(cls_or_tup, iterable.Array) and cls_or_tup.type == 'tuple':
|
||||
for typ in unite(cls_or_tup.py__iter__()):
|
||||
result |= evaluator.execute(typ)
|
||||
for lazy_context in cls_or_tup.py__iter__():
|
||||
for context in lazy_context.infer():
|
||||
result |= context.execute_evaluated()
|
||||
else:
|
||||
result |= evaluator.execute(cls_or_tup)
|
||||
return result
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import copy
|
||||
from itertools import chain
|
||||
from contextlib import contextmanager
|
||||
|
||||
from jedi.parser import tree
|
||||
from jedi.common import unite
|
||||
|
||||
|
||||
def deep_ast_copy(obj, parent=None, new_elements=None):
|
||||
@@ -116,9 +116,9 @@ def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False):
|
||||
transformed = tree.Node('power', new_power.children[start:])
|
||||
transformed.parent = new_power.parent
|
||||
return transformed
|
||||
'''
|
||||
|
||||
return new_power
|
||||
'''
|
||||
|
||||
|
||||
def get_names_of_node(node):
|
||||
@@ -163,3 +163,15 @@ class FakeName(tree.Name):
|
||||
return super(FakeName, self).is_definition()
|
||||
else:
|
||||
return self._is_definition
|
||||
|
||||
|
||||
@contextmanager
|
||||
def predefine_names(context, flow_scope, dct):
|
||||
predefined = context.predefined_names
|
||||
if flow_scope in predefined:
|
||||
raise NotImplementedError('Why does this happen?')
|
||||
predefined[flow_scope] = dct
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
del predefined[flow_scope]
|
||||
|
||||
@@ -17,6 +17,9 @@ class AbstractInstanceContext(Context):
|
||||
"""
|
||||
This class is used to evaluate instances.
|
||||
"""
|
||||
|
||||
_faked_class = None
|
||||
|
||||
def __init__(self, evaluator, parent_context, class_context, var_args):
|
||||
super(AbstractInstanceContext, self).__init__(evaluator, parent_context)
|
||||
# Generated instances are classes that are just generated by self
|
||||
@@ -98,7 +101,11 @@ class AbstractInstanceContext(Context):
|
||||
if include_self_names:
|
||||
for cls in self.class_context.py__mro__():
|
||||
if isinstance(cls, compiled.CompiledObject):
|
||||
yield CompiledSelfNameFilter(self.evaluator, self, cls, origin_scope)
|
||||
if cls.classdef is not None:
|
||||
# In this case we're talking about a fake object, it
|
||||
# doesn't make sense for normal compiled objects to
|
||||
# search for self variables.
|
||||
yield SelfNameFilter(self.evaluator, self, cls, origin_scope)
|
||||
else:
|
||||
yield SelfNameFilter(self.evaluator, self, cls, origin_scope)
|
||||
|
||||
@@ -327,7 +334,7 @@ class SelfNameFilter(InstanceClassFilter):
|
||||
|
||||
def _filter(self, names):
|
||||
names = self._filter_self_names(names)
|
||||
if isinstance(self._parser_scope, compiled.CompiledObject):
|
||||
if isinstance(self._parser_scope, compiled.CompiledObject) and False:
|
||||
# This would be for builtin skeletons, which are not yet supported.
|
||||
return list(names)
|
||||
else:
|
||||
@@ -354,14 +361,6 @@ class SelfNameFilter(InstanceClassFilter):
|
||||
return names
|
||||
|
||||
|
||||
class CompiledSelfNameFilter(SelfNameFilter):
|
||||
"""
|
||||
This filter is a bit special and exists only because of `compiled/fake/*`.
|
||||
"""
|
||||
def _access_possible(self, name):
|
||||
return True
|
||||
|
||||
|
||||
class ParamArguments(object):
|
||||
"""
|
||||
TODO This seems like a strange class, clean up?
|
||||
|
||||
@@ -216,8 +216,8 @@ class Comprehension(AbstractSequence):
|
||||
exprlist = comp_for.children[1]
|
||||
for i, lazy_context in enumerate(iterated):
|
||||
types = lazy_context.infer()
|
||||
evaluator.predefined_if_name_dict_dict[comp_for] = \
|
||||
unpack_tuple_to_dict(evaluator, types, exprlist)
|
||||
dct = unpack_tuple_to_dict(evaluator, types, exprlist)
|
||||
with helpers.predefine_names(self._defining_context, comp_for, dct):
|
||||
try:
|
||||
for result in self._nested(comp_fors[1:]):
|
||||
yield result
|
||||
@@ -227,8 +227,6 @@ class Comprehension(AbstractSequence):
|
||||
yield iterated, self._defining_context.eval_node(self._eval_node(2))
|
||||
else:
|
||||
yield iterated
|
||||
finally:
|
||||
del evaluator.predefined_if_name_dict_dict[comp_for]
|
||||
|
||||
@memoize_default(default=[])
|
||||
@common.to_list
|
||||
@@ -552,13 +550,12 @@ class MergedArray(_FakeArray):
|
||||
self._arrays = arrays
|
||||
|
||||
def py__iter__(self):
|
||||
raise NotImplementedError
|
||||
for array in self._arrays:
|
||||
for types in array.py__iter__():
|
||||
yield types
|
||||
for lazy_context in array.py__iter__():
|
||||
yield lazy_context
|
||||
|
||||
def py__getitem__(self, index):
|
||||
return unite(self.py__iter__())
|
||||
return unite(lazy_context.infer() for lazy_context in self.py__iter__())
|
||||
|
||||
def _items(self):
|
||||
for array in self._arrays:
|
||||
|
||||
@@ -24,8 +24,8 @@ def try_iter_content(types, depth=0):
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
for iter_types in f():
|
||||
try_iter_content(iter_types, depth + 1)
|
||||
for lazy_context in f():
|
||||
try_iter_content(lazy_context.infer(), depth + 1)
|
||||
|
||||
|
||||
class AbstractArguments():
|
||||
@@ -136,7 +136,7 @@ class TreeArguments(AbstractArguments):
|
||||
else: # Generator comprehension.
|
||||
# Include the brackets with the parent.
|
||||
comp = iterable.GeneratorComprehension(
|
||||
self._evaluator, self.argument_node.parent)
|
||||
self._evaluator, self.context, self.argument_node.parent)
|
||||
yield None, context.LazyKnownContext(comp)
|
||||
else:
|
||||
yield None, context.LazyTreeContext(self.context, el)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""
|
||||
Handles operator precedence.
|
||||
"""
|
||||
import operator
|
||||
import operator as op
|
||||
|
||||
from jedi._compatibility import unicode
|
||||
from jedi.parser import tree
|
||||
@@ -11,14 +11,14 @@ from jedi.evaluate import analysis
|
||||
|
||||
# Maps Python syntax to the operator module.
|
||||
COMPARISON_OPERATORS = {
|
||||
'==': operator.eq,
|
||||
'!=': operator.ne,
|
||||
'is': operator.is_,
|
||||
'is not': operator.is_not,
|
||||
'<': operator.lt,
|
||||
'<=': operator.le,
|
||||
'>': operator.gt,
|
||||
'>=': operator.ge,
|
||||
'==': op.eq,
|
||||
'!=': op.ne,
|
||||
'is': op.is_,
|
||||
'is not': op.is_not,
|
||||
'<': op.lt,
|
||||
'<=': op.le,
|
||||
'>': op.gt,
|
||||
'>=': op.ge,
|
||||
}
|
||||
|
||||
|
||||
@@ -117,23 +117,23 @@ def is_literal(obj):
|
||||
|
||||
def _is_tuple(obj):
|
||||
from jedi.evaluate import iterable
|
||||
return isinstance(obj, iterable.AbstractSequence) and obj.type == 'tuple'
|
||||
return isinstance(obj, iterable.AbstractSequence) and obj.array_type == 'tuple'
|
||||
|
||||
|
||||
def _is_list(obj):
|
||||
from jedi.evaluate import iterable
|
||||
return isinstance(obj, iterable.AbstractSequence) and obj.type == 'list'
|
||||
return isinstance(obj, iterable.AbstractSequence) and obj.array_type == 'list'
|
||||
|
||||
|
||||
def _element_calculate(evaluator, left, operator, right):
|
||||
from jedi.evaluate import iterable, representation as er
|
||||
from jedi.evaluate import iterable, instance
|
||||
l_is_num = _is_number(left)
|
||||
r_is_num = _is_number(right)
|
||||
if operator == '*':
|
||||
# for iterables, ignore * operations
|
||||
if isinstance(left, iterable.AbstractSequence) or is_string(left):
|
||||
return set([left])
|
||||
elif isinstance(right, iterable.Array) or is_string(right):
|
||||
elif isinstance(right, iterable.AbstractSequence) or is_string(right):
|
||||
return set([right])
|
||||
elif operator == '+':
|
||||
if l_is_num and r_is_num or is_string(left) and is_string(right):
|
||||
@@ -166,7 +166,7 @@ def _element_calculate(evaluator, left, operator, right):
|
||||
|
||||
def check(obj):
|
||||
"""Checks if a Jedi object is either a float or an int."""
|
||||
return isinstance(obj, er.Instance) and obj.name.get_code() in ('int', 'float')
|
||||
return isinstance(obj, instance.CompiledInstance) and obj.name.name_string in ('int', 'float')
|
||||
|
||||
# Static analysis, one is a number, the other one is not.
|
||||
if operator in ('+', '-') and l_is_num != r_is_num \
|
||||
|
||||
@@ -53,6 +53,7 @@ from jedi.evaluate import pep0484
|
||||
from jedi.evaluate import param
|
||||
from jedi.evaluate import flow_analysis
|
||||
from jedi.evaluate import imports
|
||||
from jedi.evaluate import helpers
|
||||
from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \
|
||||
GlobalNameFilter, DictFilter, ContextName, AbstractNameDefinition, \
|
||||
ParamName, AnonymousInstanceParamName
|
||||
@@ -409,13 +410,12 @@ class FunctionExecutionContext(Executed):
|
||||
input_node = for_stmt.get_input_node()
|
||||
for_types = self.eval_node(input_node)
|
||||
ordered = iterable.py__iter__(evaluator, for_types, input_node)
|
||||
for index_types in ordered:
|
||||
dct = {str(for_stmt.children[1]): index_types}
|
||||
evaluator.predefined_if_name_dict_dict[for_stmt] = dct
|
||||
for lazy_context in ordered:
|
||||
dct = {str(for_stmt.children[1]): lazy_context.infer()}
|
||||
with helpers.predefine_names(self, for_stmt, dct):
|
||||
for yield_in_same_for_stmt in yields:
|
||||
for result in self._eval_yield(yield_in_same_for_stmt):
|
||||
yield result
|
||||
del evaluator.predefined_if_name_dict_dict[for_stmt]
|
||||
|
||||
def get_filters(self, search_global, until_position=None, origin_scope=None):
|
||||
yield self.function_execution_filter(self.evaluator, self, self.funcdef,
|
||||
|
||||
@@ -338,6 +338,8 @@ set(a)[0]
|
||||
#? int() str()
|
||||
list(set(a))[1]
|
||||
#? int() str()
|
||||
next(iter(set(a)))
|
||||
#? int() str()
|
||||
list(list(set(a)))[1]
|
||||
|
||||
# does not yet work, because the recursion catching is not good enough (catches # to much)
|
||||
|
||||
Reference in New Issue
Block a user