mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-06 22:14:27 +08:00
Start using ContextualizedNode for py__iter__.
This commit is contained in:
@@ -348,7 +348,7 @@ class Script(object):
|
||||
types = context.eval_node(node)
|
||||
for testlist in node.children[:-1:2]:
|
||||
# Iterate tuples.
|
||||
unpack_tuple_to_dict(self._evaluator, types, testlist)
|
||||
unpack_tuple_to_dict(context, types, testlist)
|
||||
else:
|
||||
try_iter_content(self._evaluator.goto_definitions(context, node))
|
||||
self._evaluator.reset_recursion_limitations()
|
||||
|
||||
@@ -80,6 +80,7 @@ from jedi.evaluate import helpers
|
||||
from jedi.evaluate import pep0484
|
||||
from jedi.evaluate.filters import TreeNameDefinition, ParamName
|
||||
from jedi.evaluate.instance import AnonymousInstance, BoundMethod
|
||||
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
|
||||
|
||||
|
||||
class Evaluator(object):
|
||||
@@ -150,7 +151,8 @@ class Evaluator(object):
|
||||
types = self.eval_element(context, rhs)
|
||||
|
||||
if seek_name:
|
||||
types = finder.check_tuple_assignments(self, types, seek_name)
|
||||
c_node = ContextualizedName(context, seek_name)
|
||||
types = finder.check_tuple_assignments(self, c_node, types)
|
||||
|
||||
first_operation = stmt.first_operation()
|
||||
if first_operation not in ('=', None) and first_operation.type == 'operator':
|
||||
@@ -168,8 +170,8 @@ class Evaluator(object):
|
||||
# only in for loops without clutter, because they are
|
||||
# predictable. Also only do it, if the variable is not a tuple.
|
||||
node = for_stmt.get_input_node()
|
||||
for_iterables = self.eval_element(context, node)
|
||||
ordered = list(iterable.py__iter__(self, for_iterables, node))
|
||||
cn = ContextualizedNode(context, node)
|
||||
ordered = list(iterable.py__iter__(self, cn.infer(), cn))
|
||||
|
||||
for lazy_context in ordered:
|
||||
dct = {str(for_stmt.children[1]): lazy_context.infer()}
|
||||
@@ -451,8 +453,10 @@ class Evaluator(object):
|
||||
return self.eval_statement(context, def_, name)
|
||||
elif def_.type == 'for_stmt':
|
||||
container_types = self.eval_element(context, def_.children[3])
|
||||
for_types = iterable.py__iter__types(self, container_types, def_.children[3])
|
||||
return finder.check_tuple_assignments(self, for_types, name)
|
||||
cn = ContextualizedNode(context, def_.children[3])
|
||||
for_types = iterable.py__iter__types(self, container_types, cn)
|
||||
c_node = ContextualizedName(context, name)
|
||||
return finder.check_tuple_assignments(self, c_node, for_types)
|
||||
elif def_.type in ('import_from', 'import_name'):
|
||||
return imports.infer_import(context, name)
|
||||
|
||||
|
||||
@@ -82,6 +82,7 @@ def add(node_context, error_name, node, message=None, typ=Error, payload=None):
|
||||
|
||||
# TODO this path is probably not right
|
||||
module_context = node_context.get_root_context()
|
||||
print(module_context, node)
|
||||
module_path = module_context.py__file__()
|
||||
instance = typ(error_name, module_path, node.start_pos, message)
|
||||
debug.warning(str(instance), format=False)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from jedi._compatibility import Python3Method
|
||||
from jedi.common import unite
|
||||
from jedi.parser.python.tree import ExprStmt, CompFor
|
||||
|
||||
|
||||
class Context(object):
|
||||
@@ -14,9 +15,6 @@ class Context(object):
|
||||
self.evaluator = evaluator
|
||||
self.parent_context = parent_context
|
||||
|
||||
def get_parent_flow_context(self):
|
||||
return self.parent_context
|
||||
|
||||
def get_root_context(self):
|
||||
context = self
|
||||
while True:
|
||||
@@ -76,12 +74,6 @@ class TreeContext(Context):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
|
||||
|
||||
|
||||
class FlowContext(TreeContext):
|
||||
def get_parent_flow_context(self):
|
||||
if 1:
|
||||
return self.parent_context
|
||||
|
||||
|
||||
class AbstractLazyContext(object):
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
@@ -141,3 +133,51 @@ class MergedLazyContexts(AbstractLazyContext):
|
||||
"""data is a list of lazy contexts."""
|
||||
def infer(self):
|
||||
return unite(l.infer() for l in self.data)
|
||||
|
||||
|
||||
class ContextualizedNode(object):
|
||||
def __init__(self, context, node):
|
||||
self.context = context
|
||||
self._node = node
|
||||
|
||||
def get_root_context(self):
|
||||
return self.context.get_root_context()
|
||||
|
||||
def infer(self):
|
||||
return self.context.eval_node(self._node)
|
||||
|
||||
|
||||
class ContextualizedName(ContextualizedNode):
|
||||
# TODO merge with TreeNameDefinition?!
|
||||
@property
|
||||
def name(self):
|
||||
return self._node
|
||||
|
||||
def assignment_indexes(self):
|
||||
"""
|
||||
Returns an array of tuple(int, node) of the indexes that are used in
|
||||
tuple assignments.
|
||||
|
||||
For example if the name is ``y`` in the following code::
|
||||
|
||||
x, (y, z) = 2, ''
|
||||
|
||||
would result in ``[(1, xyz_node), (0, yz_node)]``.
|
||||
"""
|
||||
indexes = []
|
||||
node = self._node.parent
|
||||
compare = self._node
|
||||
while node is not None:
|
||||
if node.type in ('testlist_comp', 'testlist_star_expr', 'exprlist'):
|
||||
for i, child in enumerate(node.children):
|
||||
if child == compare:
|
||||
indexes.insert(0, (int(i / 2), node))
|
||||
break
|
||||
else:
|
||||
raise LookupError("Couldn't find the assignment.")
|
||||
elif isinstance(node, (ExprStmt, CompFor)):
|
||||
break
|
||||
|
||||
compare = node
|
||||
node = node.parent
|
||||
return indexes
|
||||
|
||||
@@ -73,9 +73,6 @@ class ContextName(ContextNameMixin, AbstractTreeName):
|
||||
|
||||
|
||||
class TreeNameDefinition(AbstractTreeName):
|
||||
def get_parent_flow_context(self):
|
||||
return self.parent_context
|
||||
|
||||
def infer(self):
|
||||
# Refactor this, should probably be here.
|
||||
from jedi.evaluate.finder import _name_to_types
|
||||
|
||||
@@ -30,6 +30,7 @@ from jedi.evaluate import flow_analysis
|
||||
from jedi.evaluate import param
|
||||
from jedi.evaluate import helpers
|
||||
from jedi.evaluate.filters import get_global_filters
|
||||
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
|
||||
|
||||
|
||||
class NameFinder(object):
|
||||
@@ -183,9 +184,10 @@ def _name_to_types(evaluator, context, tree_name):
|
||||
try:
|
||||
types = context.predefined_names[node][tree_name.value]
|
||||
except KeyError:
|
||||
container_types = context.eval_node(node.children[3])
|
||||
for_types = iterable.py__iter__types(evaluator, container_types, node.children[3])
|
||||
types = check_tuple_assignments(evaluator, for_types, tree_name)
|
||||
cn = ContextualizedNode(context, node.children[3])
|
||||
for_types = iterable.py__iter__types(evaluator, cn.infer(), cn)
|
||||
c_node = ContextualizedName(context, tree_name)
|
||||
types = check_tuple_assignments(evaluator, c_node, for_types)
|
||||
elif node.type == 'expr_stmt':
|
||||
types = _remove_statements(evaluator, context, node, tree_name)
|
||||
elif node.type == 'with_stmt':
|
||||
@@ -360,13 +362,14 @@ def _check_isinstance_type(context, element, search_name):
|
||||
return result
|
||||
|
||||
|
||||
def check_tuple_assignments(evaluator, types, name):
|
||||
def check_tuple_assignments(evaluator, contextualized_name, types):
|
||||
"""
|
||||
Checks if tuples are assigned.
|
||||
"""
|
||||
lazy_context = None
|
||||
for index, node in name.assignment_indexes():
|
||||
iterated = iterable.py__iter__(evaluator, types, node)
|
||||
for index, node in contextualized_name.assignment_indexes():
|
||||
cn = ContextualizedNode(contextualized_name.context, node)
|
||||
iterated = iterable.py__iter__(evaluator, types, cn)
|
||||
for _ in range(index + 1):
|
||||
try:
|
||||
lazy_context = next(iterated)
|
||||
|
||||
@@ -297,7 +297,7 @@ class Importer(object):
|
||||
method = parent_module.py__path__
|
||||
except AttributeError:
|
||||
# The module is not a package.
|
||||
_add_error(parent_module, import_path[-1])
|
||||
_add_error(self.module_context, import_path[-1])
|
||||
return set()
|
||||
else:
|
||||
paths = method()
|
||||
@@ -314,7 +314,7 @@ class Importer(object):
|
||||
except ImportError:
|
||||
module_path = None
|
||||
if module_path is None:
|
||||
_add_error(parent_module, import_path[-1])
|
||||
_add_error(self.module_context, import_path[-1])
|
||||
return set()
|
||||
else:
|
||||
parent_module = None
|
||||
|
||||
@@ -240,23 +240,24 @@ class Comprehension(AbstractSequence):
|
||||
parent_context = parent_context or self._defining_context
|
||||
input_types = parent_context.eval_node(input_node)
|
||||
|
||||
iterated = py__iter__(evaluator, input_types, input_node)
|
||||
cn = context.ContextualizedNode(parent_context, input_node)
|
||||
iterated = py__iter__(evaluator, input_types, cn)
|
||||
exprlist = comp_for.children[1]
|
||||
for i, lazy_context in enumerate(iterated):
|
||||
types = lazy_context.infer()
|
||||
dct = unpack_tuple_to_dict(evaluator, types, exprlist)
|
||||
context = self._get_comp_for_context(
|
||||
dct = unpack_tuple_to_dict(parent_context, types, exprlist)
|
||||
context_ = self._get_comp_for_context(
|
||||
parent_context,
|
||||
comp_for,
|
||||
)
|
||||
with helpers.predefine_names(context, comp_for, dct):
|
||||
with helpers.predefine_names(context_, comp_for, dct):
|
||||
try:
|
||||
for result in self._nested(comp_fors[1:], context):
|
||||
for result in self._nested(comp_fors[1:], context_):
|
||||
yield result
|
||||
except IndexError:
|
||||
iterated = context.eval_node(self._eval_node())
|
||||
iterated = context_.eval_node(self._eval_node())
|
||||
if self.array_type == 'dict':
|
||||
yield iterated, context.eval_node(self._eval_node(2))
|
||||
yield iterated, context_.eval_node(self._eval_node(2))
|
||||
else:
|
||||
yield iterated
|
||||
|
||||
@@ -561,33 +562,33 @@ class MergedArray(_FakeArray):
|
||||
return sum(len(a) for a in self._arrays)
|
||||
|
||||
|
||||
def unpack_tuple_to_dict(evaluator, types, exprlist):
|
||||
def unpack_tuple_to_dict(context, types, exprlist):
|
||||
"""
|
||||
Unpacking tuple assignments in for statements and expr_stmts.
|
||||
"""
|
||||
if exprlist.type == 'name':
|
||||
return {exprlist.value: types}
|
||||
elif exprlist.type == 'atom' and exprlist.children[0] in '([':
|
||||
return unpack_tuple_to_dict(evaluator, types, exprlist.children[1])
|
||||
return unpack_tuple_to_dict(context, types, exprlist.children[1])
|
||||
elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',
|
||||
'testlist_star_expr'):
|
||||
dct = {}
|
||||
parts = iter(exprlist.children[::2])
|
||||
n = 0
|
||||
for lazy_context in py__iter__(evaluator, types, exprlist):
|
||||
for lazy_context in py__iter__(context.evaluator, types, exprlist):
|
||||
n += 1
|
||||
try:
|
||||
part = next(parts)
|
||||
except StopIteration:
|
||||
# TODO this context is probably not right.
|
||||
analysis.add(next(iter(types)), 'value-error-too-many-values', part,
|
||||
analysis.add(context, 'value-error-too-many-values', part,
|
||||
message="ValueError: too many values to unpack (expected %s)" % n)
|
||||
else:
|
||||
dct.update(unpack_tuple_to_dict(evaluator, lazy_context.infer(), part))
|
||||
dct.update(unpack_tuple_to_dict(context, lazy_context.infer(), part))
|
||||
has_parts = next(parts, None)
|
||||
if types and has_parts is not None:
|
||||
# TODO this context is probably not right.
|
||||
analysis.add(next(iter(types)), 'value-error-too-few-values', has_parts,
|
||||
analysis.add(context, 'value-error-too-few-values', has_parts,
|
||||
message="ValueError: need more than %s values to unpack" % n)
|
||||
return dct
|
||||
elif exprlist.type == 'power' or exprlist.type == 'atom_expr':
|
||||
@@ -601,17 +602,19 @@ def unpack_tuple_to_dict(evaluator, types, exprlist):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def py__iter__(evaluator, types, node=None):
|
||||
def py__iter__(evaluator, types, contextualized_node=None):
|
||||
debug.dbg('py__iter__')
|
||||
type_iters = []
|
||||
for typ in types:
|
||||
try:
|
||||
iter_method = typ.py__iter__
|
||||
except AttributeError:
|
||||
if node is not None:
|
||||
# TODO this context is probably not right.
|
||||
analysis.add(typ, 'type-error-not-iterable', node,
|
||||
message="TypeError: '%s' object is not iterable" % typ)
|
||||
if contextualized_node is not None:
|
||||
analysis.add(
|
||||
contextualized_node.context,
|
||||
'type-error-not-iterable',
|
||||
contextualized_node._node,
|
||||
message="TypeError: '%s' object is not iterable" % typ)
|
||||
else:
|
||||
type_iters.append(iter_method())
|
||||
|
||||
@@ -621,12 +624,15 @@ def py__iter__(evaluator, types, node=None):
|
||||
)
|
||||
|
||||
|
||||
def py__iter__types(evaluator, types, node=None):
|
||||
def py__iter__types(evaluator, types, contextualized_node=None):
|
||||
"""
|
||||
Calls `py__iter__`, but ignores the ordering in the end and just returns
|
||||
all types that it contains.
|
||||
"""
|
||||
return unite(lazy_context.infer() for lazy_context in py__iter__(evaluator, types, node))
|
||||
return unite(
|
||||
lazy_context.infer()
|
||||
for lazy_context in py__iter__(evaluator, types, contextualized_node)
|
||||
)
|
||||
|
||||
|
||||
def py__getitem__(evaluator, context, types, trailer):
|
||||
|
||||
@@ -61,6 +61,7 @@ from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \
|
||||
ContextNameMixin
|
||||
from jedi.evaluate.dynamic import search_params
|
||||
from jedi.evaluate import context
|
||||
from jedi.evaluate.context import ContextualizedNode
|
||||
|
||||
|
||||
def apply_py__get__(context, base_context):
|
||||
@@ -315,8 +316,8 @@ class FunctionExecutionContext(context.TreeContext):
|
||||
def _eval_yield(self, yield_expr):
|
||||
node = yield_expr.children[1]
|
||||
if node.type == 'yield_arg': # It must be a yield from.
|
||||
yield_from_types = self.eval_node(node.children[1])
|
||||
for lazy_context in iterable.py__iter__(self.evaluator, yield_from_types, node):
|
||||
cn = ContextualizedNode(self, node.children[1])
|
||||
for lazy_context in iterable.py__iter__(self.evaluator, cn.infer(), cn):
|
||||
yield lazy_context
|
||||
else:
|
||||
yield context.LazyTreeContext(self, node)
|
||||
@@ -360,8 +361,8 @@ class FunctionExecutionContext(context.TreeContext):
|
||||
yield result
|
||||
else:
|
||||
input_node = for_stmt.get_input_node()
|
||||
for_types = self.eval_node(input_node)
|
||||
ordered = iterable.py__iter__(evaluator, for_types, input_node)
|
||||
cn = ContextualizedNode(self, input_node)
|
||||
ordered = iterable.py__iter__(evaluator, cn.infer(), cn)
|
||||
ordered = list(ordered)
|
||||
for lazy_context in ordered:
|
||||
dct = {str(for_stmt.children[1]): lazy_context.infer()}
|
||||
|
||||
@@ -12,7 +12,6 @@ compiled module that returns the types for C-builtins.
|
||||
import collections
|
||||
import re
|
||||
|
||||
from jedi._compatibility import unicode
|
||||
from jedi.common import unite
|
||||
from jedi.evaluate import compiled
|
||||
from jedi.evaluate import representation as er
|
||||
@@ -24,6 +23,7 @@ from jedi import debug
|
||||
from jedi.evaluate import precedence
|
||||
from jedi.evaluate import param
|
||||
from jedi.evaluate import analysis
|
||||
from jedi.evaluate.context import LazyTreeContext, ContextualizedNode
|
||||
|
||||
|
||||
class NotInStdLib(LookupError):
|
||||
@@ -176,7 +176,11 @@ def builtins_reversed(evaluator, sequences, obj, arguments):
|
||||
# want static analysis to work well. Therefore we need to generated the
|
||||
# values again.
|
||||
key, lazy_context = next(arguments.unpack())
|
||||
ordered = list(iterable.py__iter__(evaluator, sequences, lazy_context.data))
|
||||
cn = None
|
||||
if isinstance(lazy_context, LazyTreeContext):
|
||||
# TODO access private
|
||||
cn = ContextualizedNode(lazy_context._context, lazy_context.data)
|
||||
ordered = list(iterable.py__iter__(evaluator, sequences, cn))
|
||||
|
||||
rev = list(reversed(ordered))
|
||||
# Repack iterator values and then run it the normal way. This is
|
||||
@@ -215,11 +219,12 @@ def builtins_isinstance(evaluator, objects, types, arguments):
|
||||
bool_results.add(any(cls in mro for cls in classes))
|
||||
else:
|
||||
_, lazy_context = list(arguments.unpack())[1]
|
||||
node = lazy_context.data
|
||||
message = 'TypeError: isinstance() arg 2 must be a ' \
|
||||
'class, type, or tuple of classes and types, ' \
|
||||
'not %s.' % cls_or_tup
|
||||
analysis.add(cls_or_tup, 'type-error-isinstance', node, message)
|
||||
if isinstance(lazy_context, LazyTreeContext):
|
||||
node = lazy_context.data
|
||||
message = 'TypeError: isinstance() arg 2 must be a ' \
|
||||
'class, type, or tuple of classes and types, ' \
|
||||
'not %s.' % cls_or_tup
|
||||
analysis.add(lazy_context._context, 'type-error-isinstance', node, message)
|
||||
|
||||
return set(compiled.create(evaluator, x) for x in bool_results)
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ from jedi.evaluate.cache import memoize_default
|
||||
from jedi import debug
|
||||
from jedi import common
|
||||
from jedi.evaluate.compiled import CompiledObject
|
||||
from jedi.evaluate.context import ContextualizedNode
|
||||
|
||||
|
||||
def get_venv_path(venv):
|
||||
@@ -121,8 +122,8 @@ def _paths_from_assignment(module_context, expr_stmt):
|
||||
|
||||
from jedi.evaluate.iterable import py__iter__
|
||||
from jedi.evaluate.precedence import is_string
|
||||
types = module_context.create_context(expr_stmt).eval_node(expr_stmt)
|
||||
for lazy_context in py__iter__(module_context.evaluator, types, expr_stmt):
|
||||
cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt)
|
||||
for lazy_context in py__iter__(module_context.evaluator, cn.infer(), cn):
|
||||
for context in lazy_context.infer():
|
||||
if is_string(context):
|
||||
yield context.obj
|
||||
|
||||
@@ -141,35 +141,6 @@ class PythonMixin():
|
||||
break
|
||||
return scope
|
||||
|
||||
def assignment_indexes(self):
|
||||
"""
|
||||
Returns an array of tuple(int, node) of the indexes that are used in
|
||||
tuple assignments.
|
||||
|
||||
For example if the name is ``y`` in the following code::
|
||||
|
||||
x, (y, z) = 2, ''
|
||||
|
||||
would result in ``[(1, xyz_node), (0, yz_node)]``.
|
||||
"""
|
||||
indexes = []
|
||||
node = self.parent
|
||||
compare = self
|
||||
while node is not None:
|
||||
if node.type in ('testlist_comp', 'testlist_star_expr', 'exprlist'):
|
||||
for i, child in enumerate(node.children):
|
||||
if child == compare:
|
||||
indexes.insert(0, (int(i / 2), node))
|
||||
break
|
||||
else:
|
||||
raise LookupError("Couldn't find the assignment.")
|
||||
elif isinstance(node, (ExprStmt, CompFor)):
|
||||
break
|
||||
|
||||
compare = node
|
||||
node = node.parent
|
||||
return indexes
|
||||
|
||||
def is_scope(self):
|
||||
# Default is not being a scope. Just inherit from Scope.
|
||||
return False
|
||||
|
||||
Reference in New Issue
Block a user