Start using ContextualizedNode for py__iter__.

This commit is contained in:
Dave Halter
2017-03-28 01:34:07 +02:00
parent bb9ea54402
commit 6a9c2f8795
12 changed files with 117 additions and 88 deletions

View File

@@ -348,7 +348,7 @@ class Script(object):
types = context.eval_node(node) types = context.eval_node(node)
for testlist in node.children[:-1:2]: for testlist in node.children[:-1:2]:
# Iterate tuples. # Iterate tuples.
unpack_tuple_to_dict(self._evaluator, types, testlist) unpack_tuple_to_dict(context, types, testlist)
else: else:
try_iter_content(self._evaluator.goto_definitions(context, node)) try_iter_content(self._evaluator.goto_definitions(context, node))
self._evaluator.reset_recursion_limitations() self._evaluator.reset_recursion_limitations()

View File

@@ -80,6 +80,7 @@ from jedi.evaluate import helpers
from jedi.evaluate import pep0484 from jedi.evaluate import pep0484
from jedi.evaluate.filters import TreeNameDefinition, ParamName from jedi.evaluate.filters import TreeNameDefinition, ParamName
from jedi.evaluate.instance import AnonymousInstance, BoundMethod from jedi.evaluate.instance import AnonymousInstance, BoundMethod
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
class Evaluator(object): class Evaluator(object):
@@ -150,7 +151,8 @@ class Evaluator(object):
types = self.eval_element(context, rhs) types = self.eval_element(context, rhs)
if seek_name: if seek_name:
types = finder.check_tuple_assignments(self, types, seek_name) c_node = ContextualizedName(context, seek_name)
types = finder.check_tuple_assignments(self, c_node, types)
first_operation = stmt.first_operation() first_operation = stmt.first_operation()
if first_operation not in ('=', None) and first_operation.type == 'operator': if first_operation not in ('=', None) and first_operation.type == 'operator':
@@ -168,8 +170,8 @@ class Evaluator(object):
# only in for loops without clutter, because they are # only in for loops without clutter, because they are
# predictable. Also only do it, if the variable is not a tuple. # predictable. Also only do it, if the variable is not a tuple.
node = for_stmt.get_input_node() node = for_stmt.get_input_node()
for_iterables = self.eval_element(context, node) cn = ContextualizedNode(context, node)
ordered = list(iterable.py__iter__(self, for_iterables, node)) ordered = list(iterable.py__iter__(self, cn.infer(), cn))
for lazy_context in ordered: for lazy_context in ordered:
dct = {str(for_stmt.children[1]): lazy_context.infer()} dct = {str(for_stmt.children[1]): lazy_context.infer()}
@@ -451,8 +453,10 @@ class Evaluator(object):
return self.eval_statement(context, def_, name) return self.eval_statement(context, def_, name)
elif def_.type == 'for_stmt': elif def_.type == 'for_stmt':
container_types = self.eval_element(context, def_.children[3]) container_types = self.eval_element(context, def_.children[3])
for_types = iterable.py__iter__types(self, container_types, def_.children[3]) cn = ContextualizedNode(context, def_.children[3])
return finder.check_tuple_assignments(self, for_types, name) for_types = iterable.py__iter__types(self, container_types, cn)
c_node = ContextualizedName(context, name)
return finder.check_tuple_assignments(self, c_node, for_types)
elif def_.type in ('import_from', 'import_name'): elif def_.type in ('import_from', 'import_name'):
return imports.infer_import(context, name) return imports.infer_import(context, name)

View File

@@ -82,6 +82,7 @@ def add(node_context, error_name, node, message=None, typ=Error, payload=None):
# TODO this path is probably not right # TODO this path is probably not right
module_context = node_context.get_root_context() module_context = node_context.get_root_context()
print(module_context, node)
module_path = module_context.py__file__() module_path = module_context.py__file__()
instance = typ(error_name, module_path, node.start_pos, message) instance = typ(error_name, module_path, node.start_pos, message)
debug.warning(str(instance), format=False) debug.warning(str(instance), format=False)

View File

@@ -1,5 +1,6 @@
from jedi._compatibility import Python3Method from jedi._compatibility import Python3Method
from jedi.common import unite from jedi.common import unite
from jedi.parser.python.tree import ExprStmt, CompFor
class Context(object): class Context(object):
@@ -14,9 +15,6 @@ class Context(object):
self.evaluator = evaluator self.evaluator = evaluator
self.parent_context = parent_context self.parent_context = parent_context
def get_parent_flow_context(self):
return self.parent_context
def get_root_context(self): def get_root_context(self):
context = self context = self
while True: while True:
@@ -76,12 +74,6 @@ class TreeContext(Context):
return '<%s: %s>' % (self.__class__.__name__, self.tree_node) return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
class FlowContext(TreeContext):
def get_parent_flow_context(self):
if 1:
return self.parent_context
class AbstractLazyContext(object): class AbstractLazyContext(object):
def __init__(self, data): def __init__(self, data):
self.data = data self.data = data
@@ -141,3 +133,51 @@ class MergedLazyContexts(AbstractLazyContext):
"""data is a list of lazy contexts.""" """data is a list of lazy contexts."""
def infer(self): def infer(self):
return unite(l.infer() for l in self.data) return unite(l.infer() for l in self.data)
class ContextualizedNode(object):
def __init__(self, context, node):
self.context = context
self._node = node
def get_root_context(self):
return self.context.get_root_context()
def infer(self):
return self.context.eval_node(self._node)
class ContextualizedName(ContextualizedNode):
# TODO merge with TreeNameDefinition?!
@property
def name(self):
return self._node
def assignment_indexes(self):
"""
Returns an array of tuple(int, node) of the indexes that are used in
tuple assignments.
For example if the name is ``y`` in the following code::
x, (y, z) = 2, ''
would result in ``[(1, xyz_node), (0, yz_node)]``.
"""
indexes = []
node = self._node.parent
compare = self._node
while node is not None:
if node.type in ('testlist_comp', 'testlist_star_expr', 'exprlist'):
for i, child in enumerate(node.children):
if child == compare:
indexes.insert(0, (int(i / 2), node))
break
else:
raise LookupError("Couldn't find the assignment.")
elif isinstance(node, (ExprStmt, CompFor)):
break
compare = node
node = node.parent
return indexes

View File

@@ -73,9 +73,6 @@ class ContextName(ContextNameMixin, AbstractTreeName):
class TreeNameDefinition(AbstractTreeName): class TreeNameDefinition(AbstractTreeName):
def get_parent_flow_context(self):
return self.parent_context
def infer(self): def infer(self):
# Refactor this, should probably be here. # Refactor this, should probably be here.
from jedi.evaluate.finder import _name_to_types from jedi.evaluate.finder import _name_to_types

View File

@@ -30,6 +30,7 @@ from jedi.evaluate import flow_analysis
from jedi.evaluate import param from jedi.evaluate import param
from jedi.evaluate import helpers from jedi.evaluate import helpers
from jedi.evaluate.filters import get_global_filters from jedi.evaluate.filters import get_global_filters
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
class NameFinder(object): class NameFinder(object):
@@ -183,9 +184,10 @@ def _name_to_types(evaluator, context, tree_name):
try: try:
types = context.predefined_names[node][tree_name.value] types = context.predefined_names[node][tree_name.value]
except KeyError: except KeyError:
container_types = context.eval_node(node.children[3]) cn = ContextualizedNode(context, node.children[3])
for_types = iterable.py__iter__types(evaluator, container_types, node.children[3]) for_types = iterable.py__iter__types(evaluator, cn.infer(), cn)
types = check_tuple_assignments(evaluator, for_types, tree_name) c_node = ContextualizedName(context, tree_name)
types = check_tuple_assignments(evaluator, c_node, for_types)
elif node.type == 'expr_stmt': elif node.type == 'expr_stmt':
types = _remove_statements(evaluator, context, node, tree_name) types = _remove_statements(evaluator, context, node, tree_name)
elif node.type == 'with_stmt': elif node.type == 'with_stmt':
@@ -360,13 +362,14 @@ def _check_isinstance_type(context, element, search_name):
return result return result
def check_tuple_assignments(evaluator, types, name): def check_tuple_assignments(evaluator, contextualized_name, types):
""" """
Checks if tuples are assigned. Checks if tuples are assigned.
""" """
lazy_context = None lazy_context = None
for index, node in name.assignment_indexes(): for index, node in contextualized_name.assignment_indexes():
iterated = iterable.py__iter__(evaluator, types, node) cn = ContextualizedNode(contextualized_name.context, node)
iterated = iterable.py__iter__(evaluator, types, cn)
for _ in range(index + 1): for _ in range(index + 1):
try: try:
lazy_context = next(iterated) lazy_context = next(iterated)

View File

@@ -297,7 +297,7 @@ class Importer(object):
method = parent_module.py__path__ method = parent_module.py__path__
except AttributeError: except AttributeError:
# The module is not a package. # The module is not a package.
_add_error(parent_module, import_path[-1]) _add_error(self.module_context, import_path[-1])
return set() return set()
else: else:
paths = method() paths = method()
@@ -314,7 +314,7 @@ class Importer(object):
except ImportError: except ImportError:
module_path = None module_path = None
if module_path is None: if module_path is None:
_add_error(parent_module, import_path[-1]) _add_error(self.module_context, import_path[-1])
return set() return set()
else: else:
parent_module = None parent_module = None

View File

@@ -240,23 +240,24 @@ class Comprehension(AbstractSequence):
parent_context = parent_context or self._defining_context parent_context = parent_context or self._defining_context
input_types = parent_context.eval_node(input_node) input_types = parent_context.eval_node(input_node)
iterated = py__iter__(evaluator, input_types, input_node) cn = context.ContextualizedNode(parent_context, input_node)
iterated = py__iter__(evaluator, input_types, cn)
exprlist = comp_for.children[1] exprlist = comp_for.children[1]
for i, lazy_context in enumerate(iterated): for i, lazy_context in enumerate(iterated):
types = lazy_context.infer() types = lazy_context.infer()
dct = unpack_tuple_to_dict(evaluator, types, exprlist) dct = unpack_tuple_to_dict(parent_context, types, exprlist)
context = self._get_comp_for_context( context_ = self._get_comp_for_context(
parent_context, parent_context,
comp_for, comp_for,
) )
with helpers.predefine_names(context, comp_for, dct): with helpers.predefine_names(context_, comp_for, dct):
try: try:
for result in self._nested(comp_fors[1:], context): for result in self._nested(comp_fors[1:], context_):
yield result yield result
except IndexError: except IndexError:
iterated = context.eval_node(self._eval_node()) iterated = context_.eval_node(self._eval_node())
if self.array_type == 'dict': if self.array_type == 'dict':
yield iterated, context.eval_node(self._eval_node(2)) yield iterated, context_.eval_node(self._eval_node(2))
else: else:
yield iterated yield iterated
@@ -561,33 +562,33 @@ class MergedArray(_FakeArray):
return sum(len(a) for a in self._arrays) return sum(len(a) for a in self._arrays)
def unpack_tuple_to_dict(evaluator, types, exprlist): def unpack_tuple_to_dict(context, types, exprlist):
""" """
Unpacking tuple assignments in for statements and expr_stmts. Unpacking tuple assignments in for statements and expr_stmts.
""" """
if exprlist.type == 'name': if exprlist.type == 'name':
return {exprlist.value: types} return {exprlist.value: types}
elif exprlist.type == 'atom' and exprlist.children[0] in '([': elif exprlist.type == 'atom' and exprlist.children[0] in '([':
return unpack_tuple_to_dict(evaluator, types, exprlist.children[1]) return unpack_tuple_to_dict(context, types, exprlist.children[1])
elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist', elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',
'testlist_star_expr'): 'testlist_star_expr'):
dct = {} dct = {}
parts = iter(exprlist.children[::2]) parts = iter(exprlist.children[::2])
n = 0 n = 0
for lazy_context in py__iter__(evaluator, types, exprlist): for lazy_context in py__iter__(context.evaluator, types, exprlist):
n += 1 n += 1
try: try:
part = next(parts) part = next(parts)
except StopIteration: except StopIteration:
# TODO this context is probably not right. # TODO this context is probably not right.
analysis.add(next(iter(types)), 'value-error-too-many-values', part, analysis.add(context, 'value-error-too-many-values', part,
message="ValueError: too many values to unpack (expected %s)" % n) message="ValueError: too many values to unpack (expected %s)" % n)
else: else:
dct.update(unpack_tuple_to_dict(evaluator, lazy_context.infer(), part)) dct.update(unpack_tuple_to_dict(context, lazy_context.infer(), part))
has_parts = next(parts, None) has_parts = next(parts, None)
if types and has_parts is not None: if types and has_parts is not None:
# TODO this context is probably not right. # TODO this context is probably not right.
analysis.add(next(iter(types)), 'value-error-too-few-values', has_parts, analysis.add(context, 'value-error-too-few-values', has_parts,
message="ValueError: need more than %s values to unpack" % n) message="ValueError: need more than %s values to unpack" % n)
return dct return dct
elif exprlist.type == 'power' or exprlist.type == 'atom_expr': elif exprlist.type == 'power' or exprlist.type == 'atom_expr':
@@ -601,17 +602,19 @@ def unpack_tuple_to_dict(evaluator, types, exprlist):
raise NotImplementedError raise NotImplementedError
def py__iter__(evaluator, types, node=None): def py__iter__(evaluator, types, contextualized_node=None):
debug.dbg('py__iter__') debug.dbg('py__iter__')
type_iters = [] type_iters = []
for typ in types: for typ in types:
try: try:
iter_method = typ.py__iter__ iter_method = typ.py__iter__
except AttributeError: except AttributeError:
if node is not None: if contextualized_node is not None:
# TODO this context is probably not right. analysis.add(
analysis.add(typ, 'type-error-not-iterable', node, contextualized_node.context,
message="TypeError: '%s' object is not iterable" % typ) 'type-error-not-iterable',
contextualized_node._node,
message="TypeError: '%s' object is not iterable" % typ)
else: else:
type_iters.append(iter_method()) type_iters.append(iter_method())
@@ -621,12 +624,15 @@ def py__iter__(evaluator, types, node=None):
) )
def py__iter__types(evaluator, types, node=None): def py__iter__types(evaluator, types, contextualized_node=None):
""" """
Calls `py__iter__`, but ignores the ordering in the end and just returns Calls `py__iter__`, but ignores the ordering in the end and just returns
all types that it contains. all types that it contains.
""" """
return unite(lazy_context.infer() for lazy_context in py__iter__(evaluator, types, node)) return unite(
lazy_context.infer()
for lazy_context in py__iter__(evaluator, types, contextualized_node)
)
def py__getitem__(evaluator, context, types, trailer): def py__getitem__(evaluator, context, types, trailer):

View File

@@ -61,6 +61,7 @@ from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \
ContextNameMixin ContextNameMixin
from jedi.evaluate.dynamic import search_params from jedi.evaluate.dynamic import search_params
from jedi.evaluate import context from jedi.evaluate import context
from jedi.evaluate.context import ContextualizedNode
def apply_py__get__(context, base_context): def apply_py__get__(context, base_context):
@@ -315,8 +316,8 @@ class FunctionExecutionContext(context.TreeContext):
def _eval_yield(self, yield_expr): def _eval_yield(self, yield_expr):
node = yield_expr.children[1] node = yield_expr.children[1]
if node.type == 'yield_arg': # It must be a yield from. if node.type == 'yield_arg': # It must be a yield from.
yield_from_types = self.eval_node(node.children[1]) cn = ContextualizedNode(self, node.children[1])
for lazy_context in iterable.py__iter__(self.evaluator, yield_from_types, node): for lazy_context in iterable.py__iter__(self.evaluator, cn.infer(), cn):
yield lazy_context yield lazy_context
else: else:
yield context.LazyTreeContext(self, node) yield context.LazyTreeContext(self, node)
@@ -360,8 +361,8 @@ class FunctionExecutionContext(context.TreeContext):
yield result yield result
else: else:
input_node = for_stmt.get_input_node() input_node = for_stmt.get_input_node()
for_types = self.eval_node(input_node) cn = ContextualizedNode(self, input_node)
ordered = iterable.py__iter__(evaluator, for_types, input_node) ordered = iterable.py__iter__(evaluator, cn.infer(), cn)
ordered = list(ordered) ordered = list(ordered)
for lazy_context in ordered: for lazy_context in ordered:
dct = {str(for_stmt.children[1]): lazy_context.infer()} dct = {str(for_stmt.children[1]): lazy_context.infer()}

View File

@@ -12,7 +12,6 @@ compiled module that returns the types for C-builtins.
import collections import collections
import re import re
from jedi._compatibility import unicode
from jedi.common import unite from jedi.common import unite
from jedi.evaluate import compiled from jedi.evaluate import compiled
from jedi.evaluate import representation as er from jedi.evaluate import representation as er
@@ -24,6 +23,7 @@ from jedi import debug
from jedi.evaluate import precedence from jedi.evaluate import precedence
from jedi.evaluate import param from jedi.evaluate import param
from jedi.evaluate import analysis from jedi.evaluate import analysis
from jedi.evaluate.context import LazyTreeContext, ContextualizedNode
class NotInStdLib(LookupError): class NotInStdLib(LookupError):
@@ -176,7 +176,11 @@ def builtins_reversed(evaluator, sequences, obj, arguments):
# want static analysis to work well. Therefore we need to generated the # want static analysis to work well. Therefore we need to generated the
# values again. # values again.
key, lazy_context = next(arguments.unpack()) key, lazy_context = next(arguments.unpack())
ordered = list(iterable.py__iter__(evaluator, sequences, lazy_context.data)) cn = None
if isinstance(lazy_context, LazyTreeContext):
# TODO access private
cn = ContextualizedNode(lazy_context._context, lazy_context.data)
ordered = list(iterable.py__iter__(evaluator, sequences, cn))
rev = list(reversed(ordered)) rev = list(reversed(ordered))
# Repack iterator values and then run it the normal way. This is # Repack iterator values and then run it the normal way. This is
@@ -215,11 +219,12 @@ def builtins_isinstance(evaluator, objects, types, arguments):
bool_results.add(any(cls in mro for cls in classes)) bool_results.add(any(cls in mro for cls in classes))
else: else:
_, lazy_context = list(arguments.unpack())[1] _, lazy_context = list(arguments.unpack())[1]
node = lazy_context.data if isinstance(lazy_context, LazyTreeContext):
message = 'TypeError: isinstance() arg 2 must be a ' \ node = lazy_context.data
'class, type, or tuple of classes and types, ' \ message = 'TypeError: isinstance() arg 2 must be a ' \
'not %s.' % cls_or_tup 'class, type, or tuple of classes and types, ' \
analysis.add(cls_or_tup, 'type-error-isinstance', node, message) 'not %s.' % cls_or_tup
analysis.add(lazy_context._context, 'type-error-isinstance', node, message)
return set(compiled.create(evaluator, x) for x in bool_results) return set(compiled.create(evaluator, x) for x in bool_results)

View File

@@ -10,6 +10,7 @@ from jedi.evaluate.cache import memoize_default
from jedi import debug from jedi import debug
from jedi import common from jedi import common
from jedi.evaluate.compiled import CompiledObject from jedi.evaluate.compiled import CompiledObject
from jedi.evaluate.context import ContextualizedNode
def get_venv_path(venv): def get_venv_path(venv):
@@ -121,8 +122,8 @@ def _paths_from_assignment(module_context, expr_stmt):
from jedi.evaluate.iterable import py__iter__ from jedi.evaluate.iterable import py__iter__
from jedi.evaluate.precedence import is_string from jedi.evaluate.precedence import is_string
types = module_context.create_context(expr_stmt).eval_node(expr_stmt) cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt)
for lazy_context in py__iter__(module_context.evaluator, types, expr_stmt): for lazy_context in py__iter__(module_context.evaluator, cn.infer(), cn):
for context in lazy_context.infer(): for context in lazy_context.infer():
if is_string(context): if is_string(context):
yield context.obj yield context.obj

View File

@@ -141,35 +141,6 @@ class PythonMixin():
break break
return scope return scope
def assignment_indexes(self):
"""
Returns an array of tuple(int, node) of the indexes that are used in
tuple assignments.
For example if the name is ``y`` in the following code::
x, (y, z) = 2, ''
would result in ``[(1, xyz_node), (0, yz_node)]``.
"""
indexes = []
node = self.parent
compare = self
while node is not None:
if node.type in ('testlist_comp', 'testlist_star_expr', 'exprlist'):
for i, child in enumerate(node.children):
if child == compare:
indexes.insert(0, (int(i / 2), node))
break
else:
raise LookupError("Couldn't find the assignment.")
elif isinstance(node, (ExprStmt, CompFor)):
break
compare = node
node = node.parent
return indexes
def is_scope(self): def is_scope(self):
# Default is not being a scope. Just inherit from Scope. # Default is not being a scope. Just inherit from Scope.
return False return False