1
0
forked from VimPlug/jedi

eval_ -> infer_

This commit is contained in:
Dave Halter
2019-08-15 00:20:01 +02:00
parent 199799a966
commit 8157d119a7
23 changed files with 149 additions and 149 deletions

View File

@@ -403,7 +403,7 @@ class Script(object):
for n in import_names:
imports.infer_import(context, n)
elif node.type == 'expr_stmt':
types = context.eval_node(node)
types = context.infer_node(node)
for testlist in node.children[:-1:2]:
# Iterate tuples.
unpack_tuple_to_dict(context, types, testlist)

View File

@@ -85,7 +85,7 @@ def _add_strings(context, nodes, add_slash=False):
string = ''
first = True
for child_node in nodes:
contexts = context.eval_node(child_node)
contexts = context.infer_node(child_node)
if len(contexts) != 1:
return None
c, = contexts

View File

@@ -10,7 +10,7 @@ from parso.python import tree
from jedi._compatibility import u, Parameter
from jedi.inference.base_context import NO_CONTEXTS
from jedi.inference.syntax_tree import eval_atom
from jedi.inference.syntax_tree import infer_atom
from jedi.inference.helpers import infer_call_of_leaf
from jedi.inference.compiled import get_string_context_set
from jedi.cache import call_signature_time_cache
@@ -146,13 +146,13 @@ def infer_goto_definition(evaluator, context, leaf):
definitions = NO_CONTEXTS
if parent.type == 'atom':
# e.g. `(a + b)`
definitions = context.eval_node(leaf.parent)
definitions = context.infer_node(leaf.parent)
elif parent.type == 'trailer':
# e.g. `a()`
definitions = infer_call_of_leaf(context, leaf)
elif isinstance(leaf, tree.Literal):
# e.g. `"foo"` or `1.0`
return eval_atom(context, leaf)
return infer_atom(context, leaf)
elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'):
return get_string_context_set(evaluator)
return definitions

View File

@@ -14,30 +14,30 @@ Type inference of Python code in |jedi| is based on three assumptions:
The actual algorithm is based on a principle I call lazy type inference. That
said, the typical entry point for static analysis is calling
``eval_expr_stmt``. There's separate logic for autocompletion in the API, the
``infer_expr_stmt``. There's separate logic for autocompletion in the API, the
evaluator is all about inferring an expression.
TODO this paragraph is not what jedi does anymore, it's similar, but not the
same.
Now you need to understand what follows after ``eval_expr_stmt``. Let's
Now you need to understand what follows after ``infer_expr_stmt``. Let's
make an example::
import datetime
datetime.date.toda# <-- cursor here
First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``eval_expr_stmt`` will
about ``datetime.date``. At the end of the procedure ``infer_expr_stmt`` will
return the ``date`` class.
To *visualize* this (simplified):
- ``Evaluator.eval_expr_stmt`` doesn't do much, because there's no assignment.
- ``Context.eval_node`` cares for resolving the dotted path
- ``Evaluator.infer_expr_stmt`` doesn't do much, because there's no assignment.
- ``Context.infer_node`` cares for resolving the dotted path
- ``Evaluator.find_types`` searches for global definitions of datetime, which
it finds in the definition of an import, by scanning the syntax tree.
- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``eval_node`` to find ``date``
- Now ``find_types`` is called again by ``infer_node`` to find ``date``
inside the datetime module.
Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
@@ -49,7 +49,7 @@ What if the import would contain another ``ExprStmt`` like this::
from foo import bar
Date = bar.baz
Well... You get it. Just another ``eval_expr_stmt`` recursion. It's really
Well... You get it. Just another ``infer_expr_stmt`` recursion. It's really
easy. Python can obviously get way more complicated then this. To understand
tuple assignments, list comprehensions and everything else, a lot more code had
to be written.
@@ -80,8 +80,8 @@ from jedi.inference.base_context import ContextualizedName, ContextualizedNode,
from jedi.inference.context import ClassContext, FunctionContext, \
AnonymousInstance, BoundMethod
from jedi.inference.context.iterable import CompForContext
from jedi.inference.syntax_tree import eval_trailer, eval_expr_stmt, \
eval_node, check_tuple_assignments
from jedi.inference.syntax_tree import infer_trailer, infer_expr_stmt, \
infer_node, check_tuple_assignments
from jedi.plugins import plugin_manager
@@ -150,9 +150,9 @@ class Evaluator(object):
"""Convenience function"""
return self.project._get_sys_path(self, environment=self.environment, **kwargs)
def eval_element(self, context, element):
def infer_element(self, context, element):
if isinstance(context, CompForContext):
return eval_node(context, element)
return infer_node(context, element)
if_stmt = element
while if_stmt is not None:
@@ -211,31 +211,31 @@ class Evaluator(object):
result = NO_CONTEXTS
for name_dict in name_dicts:
with helpers.predefine_names(context, if_stmt, name_dict):
result |= eval_node(context, element)
result |= infer_node(context, element)
return result
else:
return self._eval_element_if_inferred(context, element)
return self._infer_element_if_inferred(context, element)
else:
if predefined_if_name_dict:
return eval_node(context, element)
return infer_node(context, element)
else:
return self._eval_element_if_inferred(context, element)
return self._infer_element_if_inferred(context, element)
def _eval_element_if_inferred(self, context, element):
def _infer_element_if_inferred(self, context, element):
"""
TODO This function is temporary: Merge with eval_element.
TODO This function is temporary: Merge with infer_element.
"""
parent = element
while parent is not None:
parent = parent.parent
predefined_if_name_dict = context.predefined_names.get(parent)
if predefined_if_name_dict is not None:
return eval_node(context, element)
return self._eval_element_cached(context, element)
return infer_node(context, element)
return self._infer_element_cached(context, element)
@evaluator_function_cache(default=NO_CONTEXTS)
def _eval_element_cached(self, context, element):
return eval_node(context, element)
def _infer_element_cached(self, context, element):
return infer_node(context, element)
def goto_definitions(self, context, name):
def_ = name.get_definition(import_name_always=True)
@@ -252,9 +252,9 @@ class Evaluator(object):
if type_ == 'expr_stmt':
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
return eval_expr_stmt(context, def_, name)
return infer_expr_stmt(context, def_, name)
if type_ == 'for_stmt':
container_types = context.eval_node(def_.children[3])
container_types = context.infer_node(def_.children[3])
cn = ContextualizedNode(context, def_.children[3])
for_types = iterate_contexts(container_types, cn)
c_node = ContextualizedName(context, name)
@@ -326,15 +326,15 @@ class Evaluator(object):
trailer = trailer.parent
if trailer.type != 'classdef':
if trailer.type == 'decorator':
context_set = context.eval_node(trailer.children[1])
context_set = context.infer_node(trailer.children[1])
else:
i = trailer.parent.children.index(trailer)
to_infer = trailer.parent.children[:i]
if to_infer[0] == 'await':
to_infer.pop(0)
context_set = context.eval_node(to_infer[0])
context_set = context.infer_node(to_infer[0])
for trailer in to_infer[1:]:
context_set = eval_trailer(context, context_set, trailer)
context_set = infer_trailer(context, context_set, trailer)
param_names = []
for context in context_set:
for signature in context.get_signatures():
@@ -347,7 +347,7 @@ class Evaluator(object):
if index > 0:
new_dotted = helpers.deep_ast_copy(par)
new_dotted.children[index - 1:] = []
values = context.eval_node(new_dotted)
values = context.infer_node(new_dotted)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values

View File

@@ -167,7 +167,7 @@ def _check_for_exception_catch(node_context, jedi_name, exception, payload=None)
if node is None:
return True # An exception block that catches everything.
else:
except_classes = node_context.eval_node(node)
except_classes = node_context.infer_node(node)
for cls in except_classes:
from jedi.inference.context import iterable
if isinstance(cls, iterable.Sequence) and \

View File

@@ -132,7 +132,7 @@ def _parse_argument_clinic(string):
class _AbstractArgumentsMixin(object):
def eval_all(self, funcdef=None):
def infer_all(self, funcdef=None):
"""
Inferes all arguments as a support for static analysis
(normally Jedi).
@@ -216,7 +216,7 @@ class TreeArguments(AbstractArguments):
named_args = []
for star_count, el in unpack_arglist(self.argument_node):
if star_count == 1:
arrays = self.context.eval_node(el)
arrays = self.context.infer_node(el)
iterators = [_iterate_star_args(self.context, a, el, funcdef)
for a in arrays]
for values in list(zip_longest(*iterators)):
@@ -226,7 +226,7 @@ class TreeArguments(AbstractArguments):
[v for v in values if v is not None]
)
elif star_count == 2:
arrays = self.context.eval_node(el)
arrays = self.context.infer_node(el)
for dct in arrays:
for key, values in _star_star_dict(self.context, dct, el, funcdef):
yield key, values

View File

@@ -77,8 +77,8 @@ class HelperContextMixin(object):
debug.warning('Tried to run __await__ on context %s', self)
return await_context_set.execute_with_values()
def eval_node(self, node):
return self.evaluator.eval_element(self, node)
def infer_node(self, node):
return self.evaluator.infer_element(self, node)
def create_context(self, node, node_is_context=False, node_is_object=False):
return self.evaluator.create_context(self, node, node_is_context, node_is_object)
@@ -286,7 +286,7 @@ class ContextualizedNode(object):
return self.context.get_root_context()
def infer(self):
return self.context.eval_node(self.node)
return self.context.infer_node(self.node)
def __repr__(self):
return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context)

View File

@@ -185,7 +185,7 @@ class FunctionExecutionContext(TreeContext):
def get_return_values(self, check_yields=False):
funcdef = self.tree_node
if funcdef.type == 'lambdef':
return self.eval_node(funcdef.children[-1])
return self.infer_node(funcdef.children[-1])
if check_yields:
context_set = NO_CONTEXTS
@@ -217,7 +217,7 @@ class FunctionExecutionContext(TreeContext):
ctx = compiled.builtin_from_name(self.evaluator, u'None')
context_set |= ContextSet([ctx])
else:
context_set |= self.eval_node(children[1])
context_set |= self.infer_node(children[1])
if check is flow_analysis.REACHABLE:
debug.dbg('Return reachable: %s', r)
break

View File

@@ -168,7 +168,7 @@ class ComprehensionMixin(object):
input_node = comp_for.children[3]
parent_context = parent_context or self._defining_context
input_types = parent_context.eval_node(input_node)
input_types = parent_context.infer_node(input_node)
# TODO: simulate await if self.is_async
cn = ContextualizedNode(parent_context, input_node)
@@ -186,9 +186,9 @@ class ComprehensionMixin(object):
for result in self._nested(comp_fors[1:], context_):
yield result
except IndexError:
iterated = context_.eval_node(self._entry_node)
iterated = context_.infer_node(self._entry_node)
if self.array_type == 'dict':
yield iterated, context_.eval_node(self._value_node)
yield iterated, context_.infer_node(self._value_node)
else:
yield iterated
@@ -357,14 +357,14 @@ class SequenceLiteralContext(Sequence):
if self.array_type == u'dict':
compiled_obj_index = compiled.create_simple_object(self.evaluator, index)
for key, value in self.get_tree_entries():
for k in self._defining_context.eval_node(key):
for k in self._defining_context.infer_node(key):
try:
method = k.execute_operation
except AttributeError:
pass
else:
if method(compiled_obj_index, u'==').get_safe_value():
return self._defining_context.eval_node(value)
return self._defining_context.infer_node(value)
raise SimpleGetItemNotFound('No key found in dictionary %s.' % self)
if isinstance(index, slice):
@@ -372,7 +372,7 @@ class SequenceLiteralContext(Sequence):
else:
with reraise_getitem_errors(TypeError, KeyError, IndexError):
node = self.get_tree_entries()[index]
return self._defining_context.eval_node(node)
return self._defining_context.infer_node(node)
def py__iter__(self, contextualized_node=None):
"""
@@ -383,7 +383,7 @@ class SequenceLiteralContext(Sequence):
# Get keys.
types = NO_CONTEXTS
for k, _ in self.get_tree_entries():
types |= self._defining_context.eval_node(k)
types |= self._defining_context.infer_node(k)
# We don't know which dict index comes first, therefore always
# yield all the types.
for _ in types:
@@ -392,7 +392,7 @@ class SequenceLiteralContext(Sequence):
for node in self.get_tree_entries():
if node == ':' or node.type == 'subscript':
# TODO this should probably use at least part of the code
# of eval_subscript_list.
# of infer_subscript_list.
yield LazyKnownContext(Slice(self._defining_context, None, None, None))
else:
yield LazyTreeContext(self._defining_context, node)
@@ -405,7 +405,7 @@ class SequenceLiteralContext(Sequence):
def _dict_values(self):
return ContextSet.from_sets(
self._defining_context.eval_node(v)
self._defining_context.infer_node(v)
for k, v in self.get_tree_entries()
)
@@ -460,7 +460,7 @@ class SequenceLiteralContext(Sequence):
resolved (as a string) and the values are still lazy contexts.
"""
for key_node, value in self.get_tree_entries():
for key in self._defining_context.eval_node(key_node):
for key in self._defining_context.infer_node(key_node):
if is_string(key):
yield key.get_safe_value(), LazyTreeContext(self._defining_context, value)
@@ -495,7 +495,7 @@ class DictLiteralContext(_DictMixin, SequenceLiteralContext):
def _dict_keys(self):
return ContextSet.from_sets(
self._defining_context.eval_node(k)
self._defining_context.infer_node(k)
for k, v in self.get_tree_entries()
)
@@ -806,7 +806,7 @@ class Slice(object):
if element is None:
return None
result = self._context.eval_node(element)
result = self._context.infer_node(element)
if len(result) != 1:
# For simplicity, we want slices to be clear defined with just
# one type. Otherwise we will return an empty slice object.

View File

@@ -241,7 +241,7 @@ def _execute_types_in_stmt(module_context, stmt):
doesn't include tuple, list and dict literals, because the stuff they
contain is executed. (Used as type information).
"""
definitions = module_context.eval_node(stmt)
definitions = module_context.infer_node(stmt)
return ContextSet.from_sets(
_execute_array_values(module_context.evaluator, d)
for d in definitions
@@ -272,7 +272,7 @@ def infer_param(execution_context, param):
from jedi.inference.context.instance import InstanceArguments
from jedi.inference.context import FunctionExecutionContext
def eval_docstring(docstring):
def infer_docstring(docstring):
return ContextSet(
p
for param_str in _search_param_in_docstr(docstring, param.name.value)
@@ -283,12 +283,12 @@ def infer_param(execution_context, param):
if func.type == 'lambdef':
return NO_CONTEXTS
types = eval_docstring(execution_context.py__doc__())
types = infer_docstring(execution_context.py__doc__())
if isinstance(execution_context, FunctionExecutionContext) \
and isinstance(execution_context.var_args, InstanceArguments) \
and execution_context.function_context.py__name__() == '__init__':
class_context = execution_context.var_args.instance.class_context
types |= eval_docstring(class_context.py__doc__())
types |= infer_docstring(class_context.py__doc__())
debug.dbg('Found param types for docstring: %s', types, color='BLUE')
return types

View File

@@ -110,7 +110,7 @@ def _check_if(context, node):
if not allowed:
return UNSURE
types = context.eval_node(node)
types = context.infer_node(node)
values = set(x.py__bool__() for x in types)
if len(values) == 1:
return Status.lookup_table[values.pop()]

View File

@@ -21,7 +21,7 @@ from jedi import debug
from jedi import parser_utils
def eval_annotation(context, annotation):
def infer_annotation(context, annotation):
"""
Inferes an annotation node. This means that it inferes the part of
`int` here:
@@ -30,7 +30,7 @@ def eval_annotation(context, annotation):
Also checks for forward references (strings)
"""
context_set = context.eval_node(annotation)
context_set = context.infer_node(annotation)
if len(context_set) != 1:
debug.warning("Eval'ed typing index %s should lead to 1 object, "
" not %s" % (annotation, context_set))
@@ -40,7 +40,7 @@ def eval_annotation(context, annotation):
if is_string(evaled_context):
result = _get_forward_reference_node(context, evaled_context.get_safe_value())
if result is not None:
return context.eval_node(result)
return context.infer_node(result)
return context_set
@@ -49,7 +49,7 @@ def _infer_annotation_string(context, string, index=None):
if node is None:
return NO_CONTEXTS
context_set = context.eval_node(node)
context_set = context.infer_node(node)
if index is not None:
context_set = context_set.filter(
lambda context: context.array_type == u'tuple' # noqa
@@ -174,7 +174,7 @@ def _infer_param(execution_context, param):
)
# Annotations are like default params and resolve in the same way.
context = execution_context.function_context.get_default_param_context()
return eval_annotation(context, annotation)
return infer_annotation(context, annotation)
def py__annotations__(funcdef):
@@ -218,7 +218,7 @@ def infer_return_types(function_execution_context):
context = function_execution_context.function_context.get_default_param_context()
unknown_type_vars = list(find_unknown_type_vars(context, annotation))
annotation_contexts = eval_annotation(context, annotation)
annotation_contexts = infer_annotation(context, annotation)
if not unknown_type_vars:
return annotation_contexts.execute_annotation()
@@ -254,7 +254,7 @@ def infer_type_vars_for_execution(execution_context, annotation_dict):
annotation_variables = find_unknown_type_vars(context, annotation_node)
if annotation_variables:
# Infer unknown type var
annotation_context_set = context.eval_node(annotation_node)
annotation_context_set = context.infer_node(annotation_node)
star_count = executed_param._param_node.star_count
actual_context_set = executed_param.infer(use_hints=False)
if star_count == 1:
@@ -385,7 +385,7 @@ def find_unknown_type_vars(context, node):
for subscript_node in _unpack_subscriptlist(trailer.children[1]):
check_node(subscript_node)
else:
type_var_set = context.eval_node(node)
type_var_set = context.infer_node(node)
for type_var in type_var_set:
if isinstance(type_var, TypeVar) and type_var not in found:
found.append(type_var)

View File

@@ -235,7 +235,7 @@ def _iter_over_arguments(maybe_tuple_context, defining_context):
from jedi.inference.gradual.annotation import _get_forward_reference_node
node = _get_forward_reference_node(defining_context, context.get_safe_value())
if node is not None:
for c in defining_context.eval_node(node):
for c in defining_context.infer_node(node):
yield c
else:
yield context

View File

@@ -73,8 +73,8 @@ def infer_call_of_leaf(context, leaf, cut_own_trailer=False):
# we should not match anything more than x.
if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]):
if trailer.type == 'atom':
return context.eval_node(trailer)
return context.eval_node(leaf)
return context.infer_node(trailer)
return context.infer_node(leaf)
power = trailer.parent
index = power.children.index(trailer)
@@ -99,10 +99,10 @@ def infer_call_of_leaf(context, leaf, cut_own_trailer=False):
base = trailers[0]
trailers = trailers[1:]
values = context.eval_node(base)
from jedi.inference.syntax_tree import eval_trailer
values = context.infer_node(base)
from jedi.inference.syntax_tree import infer_trailer
for trailer in trailers:
values = eval_trailer(context, values, trailer)
values = infer_trailer(context, values, trailer)
return values

View File

@@ -43,7 +43,7 @@ class LazyTreeContext(AbstractLazyContext):
def infer(self):
with monkeypatch(self.context, 'predefined_names', self._predefined_names):
return self.context.eval_node(self.data)
return self.context.infer_node(self.data)
def get_merged_lazy_context(lazy_contexts):

View File

@@ -241,7 +241,7 @@ class ParamName(BaseTreeParamName):
node = self.annotation_node
if node is None:
return NO_CONTEXTS
contexts = self.parent_context.parent_context.eval_node(node)
contexts = self.parent_context.parent_context.infer_node(node)
if execute_annotation:
contexts = contexts.execute_annotation()
return contexts
@@ -250,7 +250,7 @@ class ParamName(BaseTreeParamName):
node = self.default_node
if node is None:
return NO_CONTEXTS
return self.parent_context.parent_context.eval_node(node)
return self.parent_context.parent_context.infer_node(node)
@property
def default_node(self):

View File

@@ -62,16 +62,16 @@ def _goes_to_param_name(param_name, context, potential_name):
def _to_callables(context, trailer):
from jedi.inference.syntax_tree import eval_trailer
from jedi.inference.syntax_tree import infer_trailer
atom_expr = trailer.parent
index = atom_expr.children[0] == 'await'
# Eval atom first
contexts = context.eval_node(atom_expr.children[index])
contexts = context.infer_node(atom_expr.children[index])
for trailer2 in atom_expr.children[index + 1:]:
if trailer == trailer2:
break
contexts = eval_trailer(context, contexts, trailer2)
contexts = infer_trailer(context, contexts, trailer2)
return contexts

View File

@@ -68,16 +68,16 @@ def _py__stop_iteration_returns(generators):
@debug.increase_indent
@_limit_context_infers
def eval_node(context, element):
debug.dbg('eval_node %s@%s in %s', element, element.start_pos, context)
def infer_node(context, element):
debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context)
evaluator = context.evaluator
typ = element.type
if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'):
return eval_atom(context, element)
return infer_atom(context, element)
elif typ == 'lambdef':
return ContextSet([FunctionContext.from_context(context, element)])
elif typ == 'expr_stmt':
return eval_expr_stmt(context, element)
return infer_expr_stmt(context, element)
elif typ in ('power', 'atom_expr'):
first_child = element.children[0]
children = element.children[1:]
@@ -86,11 +86,11 @@ def eval_node(context, element):
had_await = True
first_child = children.pop(0)
context_set = context.eval_node(first_child)
context_set = context.infer_node(first_child)
for (i, trailer) in enumerate(children):
if trailer == '**': # has a power operation.
right = context.eval_node(children[i + 1])
context_set = _eval_comparison(
right = context.infer_node(children[i + 1])
context_set = _infer_comparison(
evaluator,
context,
context_set,
@@ -98,7 +98,7 @@ def eval_node(context, element):
right
)
break
context_set = eval_trailer(context, context_set, trailer)
context_set = infer_trailer(context, context_set, trailer)
if had_await:
return context_set.py__await__().py__stop_iteration_returns()
@@ -107,14 +107,14 @@ def eval_node(context, element):
# The implicit tuple in statements.
return ContextSet([iterable.SequenceLiteralContext(evaluator, context, element)])
elif typ in ('not_test', 'factor'):
context_set = context.eval_node(element.children[-1])
context_set = context.infer_node(element.children[-1])
for operator in element.children[:-1]:
context_set = eval_factor(context_set, operator)
context_set = infer_factor(context_set, operator)
return context_set
elif typ == 'test':
# `x if foo else y` case.
return (context.eval_node(element.children[0]) |
context.eval_node(element.children[-1]))
return (context.infer_node(element.children[0]) |
context.infer_node(element.children[-1]))
elif typ == 'operator':
# Must be an ellipsis, other operators are not inferred.
# In Python 2 ellipsis is coded as three single dot tokens, not
@@ -124,33 +124,33 @@ def eval_node(context, element):
raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin))
return ContextSet([compiled.builtin_from_name(evaluator, u'Ellipsis')])
elif typ == 'dotted_name':
context_set = eval_atom(context, element.children[0])
context_set = infer_atom(context, element.children[0])
for next_name in element.children[2::2]:
# TODO add search_global=True?
context_set = context_set.py__getattribute__(next_name, name_context=context)
return context_set
elif typ == 'eval_input':
return eval_node(context, element.children[0])
return infer_node(context, element.children[0])
elif typ == 'annassign':
return annotation.eval_annotation(context, element.children[1]) \
return annotation.infer_annotation(context, element.children[1]) \
.execute_annotation()
elif typ == 'yield_expr':
if len(element.children) and element.children[1].type == 'yield_arg':
# Implies that it's a yield from.
element = element.children[1].children[1]
generators = context.eval_node(element) \
generators = context.infer_node(element) \
.py__getattribute__('__iter__').execute_with_values()
return generators.py__stop_iteration_returns()
# Generator.send() is not implemented.
return NO_CONTEXTS
elif typ == 'namedexpr_test':
return eval_node(context, element.children[2])
return infer_node(context, element.children[2])
else:
return eval_or_test(context, element)
return infer_or_test(context, element)
def eval_trailer(context, atom_contexts, trailer):
def infer_trailer(context, atom_contexts, trailer):
trailer_op, node = trailer.children[:2]
if node == ')': # `arglist` is optional.
node = None
@@ -158,11 +158,11 @@ def eval_trailer(context, atom_contexts, trailer):
if trailer_op == '[':
trailer_op, node, _ = trailer.children
return atom_contexts.get_item(
eval_subscript_list(context.evaluator, context, node),
infer_subscript_list(context.evaluator, context, node),
ContextualizedNode(context, trailer)
)
else:
debug.dbg('eval_trailer: %s in %s', trailer, atom_contexts)
debug.dbg('infer_trailer: %s in %s', trailer, atom_contexts)
if trailer_op == '.':
return atom_contexts.py__getattribute__(
name_context=context,
@@ -174,7 +174,7 @@ def eval_trailer(context, atom_contexts, trailer):
return atom_contexts.execute(args)
def eval_atom(context, atom):
def infer_atom(context, atom):
"""
Basically to process ``atom`` nodes. The parser sometimes doesn't
generate the node (because it has just one child). In that case an atom
@@ -222,10 +222,10 @@ def eval_atom(context, atom):
return ContextSet([compiled.create_simple_object(context.evaluator, string)])
elif atom.type == 'strings':
# Will be multiple string.
context_set = eval_atom(context, atom.children[0])
context_set = infer_atom(context, atom.children[0])
for string in atom.children[1:]:
right = eval_atom(context, string)
context_set = _eval_comparison(context.evaluator, context, context_set, u'+', right)
right = infer_atom(context, string)
context_set = _infer_comparison(context.evaluator, context, context_set, u'+', right)
return context_set
elif atom.type == 'fstring':
return compiled.get_string_context_set(context.evaluator)
@@ -235,7 +235,7 @@ def eval_atom(context, atom):
if c[0] == '(' and not len(c) == 2 \
and not(c[1].type == 'testlist_comp' and
len(c[1].children) > 1):
return context.eval_node(c[1])
return context.infer_node(c[1])
try:
comp_for = c[1].children[1]
@@ -269,7 +269,7 @@ def eval_atom(context, atom):
@_limit_context_infers
def eval_expr_stmt(context, stmt, seek_name=None):
def infer_expr_stmt(context, stmt, seek_name=None):
with recursion.execution_allowed(context.evaluator, stmt) as allowed:
# Here we allow list/set to recurse under certain conditions. To make
# it possible to resolve stuff like list(set(list(x))), this is
@@ -286,12 +286,12 @@ def eval_expr_stmt(context, stmt, seek_name=None):
allowed = True
if allowed:
return _eval_expr_stmt(context, stmt, seek_name)
return _infer_expr_stmt(context, stmt, seek_name)
return NO_CONTEXTS
@debug.increase_indent
def _eval_expr_stmt(context, stmt, seek_name=None):
def _infer_expr_stmt(context, stmt, seek_name=None):
"""
The starting point of the completion. A statement always owns a call
list, which are the calls, that a statement does. In case multiple
@@ -300,9 +300,9 @@ def _eval_expr_stmt(context, stmt, seek_name=None):
:param stmt: A `tree.ExprStmt`.
"""
debug.dbg('eval_expr_stmt %s (%s)', stmt, seek_name)
debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name)
rhs = stmt.get_rhs()
context_set = context.eval_node(rhs)
context_set = context.infer_node(rhs)
if seek_name:
c_node = ContextualizedName(context, seek_name)
@@ -330,18 +330,18 @@ def _eval_expr_stmt(context, stmt, seek_name=None):
for lazy_context in ordered:
dct = {for_stmt.children[1].value: lazy_context.infer()}
with helpers.predefine_names(context, for_stmt, dct):
t = context.eval_node(rhs)
left = _eval_comparison(context.evaluator, context, left, operator, t)
t = context.infer_node(rhs)
left = _infer_comparison(context.evaluator, context, left, operator, t)
context_set = left
else:
context_set = _eval_comparison(context.evaluator, context, left, operator, context_set)
debug.dbg('eval_expr_stmt result %s', context_set)
context_set = _infer_comparison(context.evaluator, context, left, operator, context_set)
debug.dbg('infer_expr_stmt result %s', context_set)
return context_set
def eval_or_test(context, or_test):
def infer_or_test(context, or_test):
iterator = iter(or_test.children)
types = context.eval_node(next(iterator))
types = context.infer_node(next(iterator))
for operator in iterator:
right = next(iterator)
if operator.type == 'comp_op': # not in / is not
@@ -352,20 +352,20 @@ def eval_or_test(context, or_test):
left_bools = set(left.py__bool__() for left in types)
if left_bools == {True}:
if operator == 'and':
types = context.eval_node(right)
types = context.infer_node(right)
elif left_bools == {False}:
if operator != 'and':
types = context.eval_node(right)
types = context.infer_node(right)
# Otherwise continue, because of uncertainty.
else:
types = _eval_comparison(context.evaluator, context, types, operator,
context.eval_node(right))
debug.dbg('eval_or_test types %s', types)
types = _infer_comparison(context.evaluator, context, types, operator,
context.infer_node(right))
debug.dbg('infer_or_test types %s', types)
return types
@iterator_to_context_set
def eval_factor(context_set, operator):
def infer_factor(context_set, operator):
"""
Calculates `+`, `-`, `~` and `not` prefixes.
"""
@@ -397,7 +397,7 @@ def _literals_to_types(evaluator, result):
return new_result
def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts):
def _infer_comparison(evaluator, context, left_contexts, operator, right_contexts):
if not left_contexts or not right_contexts:
# illegal slices e.g. cause left/right_result to be None
result = (left_contexts or NO_CONTEXTS) | (right_contexts or NO_CONTEXTS)
@@ -410,7 +410,7 @@ def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts
return _literals_to_types(evaluator, left_contexts | right_contexts)
else:
return ContextSet.from_sets(
_eval_comparison_part(evaluator, context, left, operator, right)
_infer_comparison_part(evaluator, context, left, operator, right)
for left in left_contexts
for right in right_contexts
)
@@ -461,7 +461,7 @@ def _get_tuple_ints(context):
return numbers
def _eval_comparison_part(evaluator, context, left, operator, right):
def _infer_comparison_part(evaluator, context, left, operator, right):
l_is_num = is_number(left)
r_is_num = is_number(right)
if isinstance(operator, unicode):
@@ -543,7 +543,7 @@ def _remove_statements(evaluator, context, stmt, name):
if pep0484_contexts:
return pep0484_contexts
return eval_expr_stmt(context, stmt, seek_name=name)
return infer_expr_stmt(context, stmt, seek_name=name)
@plugin_manager.decorate()
@@ -559,7 +559,7 @@ def tree_name_to_contexts(evaluator, context, tree_name):
if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign":
correct_scope = parser_utils.get_parent_scope(name) == context.tree_node
if correct_scope:
context_set |= annotation.eval_annotation(
context_set |= annotation.infer_annotation(
context, expr_stmt.children[1].children[1]
).execute_annotation()
if context_set:
@@ -579,7 +579,7 @@ def tree_name_to_contexts(evaluator, context, tree_name):
return finder.find(filters, attribute_lookup=False)
elif node.type not in ('import_from', 'import_name'):
context = evaluator.create_context(context, tree_name)
return eval_atom(context, tree_name)
return infer_atom(context, tree_name)
typ = node.type
if typ == 'for_stmt':
@@ -606,7 +606,7 @@ def tree_name_to_contexts(evaluator, context, tree_name):
elif typ == 'expr_stmt':
types = _remove_statements(evaluator, context, node, tree_name)
elif typ == 'with_stmt':
context_managers = context.eval_node(node.get_test_node_from_name(tree_name))
context_managers = context.infer_node(node.get_test_node_from_name(tree_name))
enter_methods = context_managers.py__getattribute__(u'__enter__')
return enter_methods.execute_with_values()
elif typ in ('import_from', 'import_name'):
@@ -617,7 +617,7 @@ def tree_name_to_contexts(evaluator, context, tree_name):
# TODO an exception can also be a tuple. Check for those.
# TODO check for types that are not classes and add it to
# the static analysis report.
exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling())
exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling())
types = exceptions.execute_with_values()
elif node.type == 'param':
types = NO_CONTEXTS
@@ -646,13 +646,13 @@ def _apply_decorators(context, node):
for dec in reversed(node.get_decorators()):
debug.dbg('decorator: %s %s', dec, values, color="MAGENTA")
with debug.increase_indent_cm():
dec_values = context.eval_node(dec.children[1])
dec_values = context.infer_node(dec.children[1])
trailer_nodes = dec.children[2:-1]
if trailer_nodes:
# Create a trailer and infer it.
trailer = tree.PythonNode('trailer', trailer_nodes)
trailer.parent = dec
dec_values = eval_trailer(context, dec_values, trailer)
dec_values = infer_trailer(context, dec_values, trailer)
if not len(dec_values):
code = dec.get_code(include_prefix=False)
@@ -698,7 +698,7 @@ def check_tuple_assignments(evaluator, contextualized_name, context_set):
return context_set
def eval_subscript_list(evaluator, context, index):
def infer_subscript_list(evaluator, context, index):
"""
Handles slices in subscript nodes.
"""
@@ -727,4 +727,4 @@ def eval_subscript_list(evaluator, context, index):
return ContextSet([iterable.SequenceLiteralContext(evaluator, context, index)])
# No slices
return context.eval_node(index)
return context.infer_node(index)

View File

@@ -85,7 +85,7 @@ def _paths_from_list_modifications(module_context, trailer1, trailer2):
if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma.
arg = arg.children[2]
for context in module_context.create_context(arg).eval_node(arg):
for context in module_context.create_context(arg).infer_node(arg):
if is_string(context):
abs_path = _abs_path(module_context, context.get_safe_value())
if abs_path is not None:

View File

@@ -627,7 +627,7 @@ class DataclassParamName(BaseTreeParamName):
if self.annotation_node is None:
return NO_CONTEXTS
else:
return self.parent_context.eval_node(self.annotation_node)
return self.parent_context.infer_node(self.annotation_node)
class ItemGetterCallable(ContextWrapper):

View File

@@ -232,7 +232,7 @@ class IntegrationTestCase(object):
user_context = user_context.get_function_execution()
element.parent = user_context.tree_node
results = convert_contexts(
evaluator.eval_element(user_context, element),
evaluator.infer_element(user_context, element),
)
if not results:
raise Exception('Could not resolve %s on line %s'

View File

@@ -2,7 +2,7 @@ import pytest
from jedi.inference.context import TreeInstance
def _eval_literal(Script, code, is_fstring=False):
def _infer_literal(Script, code, is_fstring=False):
def_, = Script(code).goto_definitions()
if is_fstring:
assert def_.name == 'str'
@@ -20,15 +20,15 @@ def test_f_strings(Script, environment):
if environment.version_info < (3, 6):
pytest.skip()
assert _eval_literal(Script, 'f"asdf"', is_fstring=True) == ''
assert _eval_literal(Script, 'f"{asdf} "', is_fstring=True) == ''
assert _eval_literal(Script, 'F"{asdf} "', is_fstring=True) == ''
assert _eval_literal(Script, 'rF"{asdf} "', is_fstring=True) == ''
assert _infer_literal(Script, 'f"asdf"', is_fstring=True) == ''
assert _infer_literal(Script, 'f"{asdf} "', is_fstring=True) == ''
assert _infer_literal(Script, 'F"{asdf} "', is_fstring=True) == ''
assert _infer_literal(Script, 'rF"{asdf} "', is_fstring=True) == ''
def test_rb_strings(Script, environment):
assert _eval_literal(Script, 'br"asdf"') == b'asdf'
obj = _eval_literal(Script, 'rb"asdf"')
assert _infer_literal(Script, 'br"asdf"') == b'asdf'
obj = _infer_literal(Script, 'rb"asdf"')
# rb is not valid in Python 2. Due to error recovery we just get a
# string.
@@ -39,8 +39,8 @@ def test_thousand_separators(Script, environment):
if environment.version_info < (3, 6):
pytest.skip()
assert _eval_literal(Script, '1_2_3') == 123
assert _eval_literal(Script, '123_456_789') == 123456789
assert _eval_literal(Script, '0x3_4') == 52
assert _eval_literal(Script, '0b1_0') == 2
assert _eval_literal(Script, '0o1_0') == 8
assert _infer_literal(Script, '1_2_3') == 123
assert _infer_literal(Script, '123_456_789') == 123456789
assert _infer_literal(Script, '0x3_4') == 52
assert _infer_literal(Script, '0b1_0') == 2
assert _infer_literal(Script, '0o1_0') == 8

View File

@@ -14,5 +14,5 @@ def test_equals(Script, environment, source):
pytest.skip("Ellipsis does not exists in 2")
script = Script(source)
node = script._module_node.children[0]
first, = script._get_module().eval_node(node)
first, = script._get_module().infer_node(node)
assert isinstance(first, CompiledObject) and first.get_safe_value() is True