Merge branch 'values'

This commit is contained in:
Dave Halter
2017-09-28 16:19:38 +02:00
30 changed files with 1133 additions and 1047 deletions

View File

@@ -33,6 +33,7 @@ from jedi.evaluate.helpers import get_module_names, evaluate_call_of_leaf
from jedi.evaluate.sys_path import get_venv_path, dotted_path_in_sys_path
from jedi.evaluate.iterable import unpack_tuple_to_dict
from jedi.evaluate.filters import TreeNameDefinition
from jedi.evaluate.syntax_tree import tree_name_to_contexts
# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
# can remove some "maximum recursion depth" errors.
@@ -319,10 +320,8 @@ class Script(object):
for node in get_executable_nodes(module_node):
context = self._get_module().create_context(node)
if node.type in ('funcdef', 'classdef'):
# TODO This is stupid, should be private
from jedi.evaluate.finder import _name_to_types
# Resolve the decorators.
_name_to_types(self._evaluator, context, node.children[1])
tree_name_to_contexts(self._evaluator, context, node.children[1])
elif isinstance(node, tree.Import):
import_names = set(node.get_defined_names())
if node.is_nested():

View File

@@ -10,7 +10,7 @@ from parso.python.tree import search_ancestor
from jedi._compatibility import u
from jedi import settings
from jedi import common
from jedi.evaluate.utils import ignored, unite
from jedi.cache import memoize_method
from jedi.evaluate import representation as er
from jedi.evaluate import instance
@@ -290,7 +290,7 @@ class BaseDefinition(object):
if not path:
return None # for keywords the path is empty
with common.ignored(KeyError):
with ignored(KeyError):
path[0] = self._mapping[path[0]]
for key, repl in self._tuple_mapping.items():
if tuple(path[:len(key)]) == key:
@@ -567,7 +567,7 @@ class Definition(BaseDefinition):
"""
defs = self._name.infer()
return sorted(
common.unite(defined_names(self._evaluator, d) for d in defs),
unite(defined_names(self._evaluator, d) for d in defs),
key=lambda s: s._name.start_pos or (0, 0)
)

View File

@@ -10,6 +10,7 @@ from parso.python import tree
from parso import split_lines
from jedi._compatibility import u
from jedi.evaluate.syntax_tree import eval_atom
from jedi.evaluate.helpers import evaluate_call_of_leaf
from jedi.cache import time_cache
@@ -206,7 +207,7 @@ def evaluate_goto_definition(evaluator, context, leaf):
elif parent.type == 'trailer':
return evaluate_call_of_leaf(context, leaf)
elif isinstance(leaf, tree.Literal):
return context.evaluator.eval_atom(context, leaf)
return eval_atom(context, leaf)
return []

View File

@@ -2,7 +2,7 @@ import pydoc
import keyword
from jedi._compatibility import is_py3, is_py35
from jedi import common
from jedi.evaluate.utils import ignored
from jedi.evaluate.filters import AbstractNameDefinition
from parso.python.tree import Leaf
@@ -123,7 +123,7 @@ def imitate_pydoc(string):
# with unicode strings)
string = str(string)
h = pydoc.help
with common.ignored(KeyError):
with ignored(KeyError):
# try to access symbols
string = h.symbols[string]
string, _, related = string.partition(' ')

1
jedi/common/__init__.py Normal file
View File

@@ -0,0 +1 @@
from jedi.common.context import BaseContextSet

54
jedi/common/context.py Normal file
View File

@@ -0,0 +1,54 @@
class BaseContextSet(object):
def __init__(self, *args):
self._set = set(args)
@classmethod
def from_iterable(cls, iterable):
return cls.from_set(set(iterable))
@classmethod
def from_set(cls, set_):
self = cls()
self._set = set_
return self
@classmethod
def from_sets(cls, sets):
"""
Used to work with an iterable of set.
"""
aggregated = set()
sets = list(sets)
for set_ in sets:
if isinstance(set_, BaseContextSet):
aggregated |= set_._set
else:
aggregated |= set_
return cls.from_set(aggregated)
def __or__(self, other):
return type(self).from_set(self._set | other._set)
def __iter__(self):
for element in self._set:
yield element
def __bool__(self):
return bool(self._set)
def __len__(self):
return len(self._set)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join(str(s) for s in self._set))
def filter(self, filter_func):
return type(self).from_iterable(filter(filter_func, self._set))
def __getattr__(self, name):
def mapper(*args, **kwargs):
return type(self).from_sets(
getattr(context, name)(*args, **kwargs)
for context in self._set
)
return mapper

View File

@@ -12,29 +12,31 @@ Evaluation of Python code in |jedi| is based on three assumptions:
* The programmer is not a total dick, e.g. like `this
<https://github.com/davidhalter/jedi/issues/24>`_ :-)
The actual algorithm is based on a principle called lazy evaluation. If you
don't know about it, google it. That said, the typical entry point for static
analysis is calling ``eval_statement``. There's separate logic for
autocompletion in the API, the evaluator is all about evaluating an expression.
The actual algorithm is based on a principle called lazy evaluation. That
said, the typical entry point for static analysis is calling
``eval_expr_stmt``. There's separate logic for autocompletion in the API, the
evaluator is all about evaluating an expression.
Now you need to understand what follows after ``eval_statement``. Let's
TODO this paragraph is not what jedi does anymore.
Now you need to understand what follows after ``eval_expr_stmt``. Let's
make an example::
import datetime
datetime.date.toda# <-- cursor here
First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``eval_statement`` will
about ``datetime.date``. At the end of the procedure ``eval_expr_stmt`` will
return the ``date`` class.
To *visualize* this (simplified):
- ``Evaluator.eval_statement`` doesn't do much, because there's no assignment.
- ``Evaluator.eval_element`` cares for resolving the dotted path
- ``Evaluator.eval_expr_stmt`` doesn't do much, because there's no assignment.
- ``Context.eval_node`` cares for resolving the dotted path
- ``Evaluator.find_types`` searches for global definitions of datetime, which
it finds in the definition of an import, by scanning the syntax tree.
- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``eval_element`` to find ``date``
- Now ``find_types`` is called again by ``eval_node`` to find ``date``
inside the datetime module.
Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
@@ -46,7 +48,7 @@ What if the import would contain another ``ExprStmt`` like this::
from foo import bar
Date = bar.baz
Well... You get it. Just another ``eval_statement`` recursion. It's really
Well... You get it. Just another ``eval_expr_stmt`` recursion. It's really
easy. Python can obviously get way more complicated then this. To understand
tuple assignments, list comprehensions and everything else, a lot more code had
to be written.
@@ -67,48 +69,23 @@ from parso.python import tree
import parso
from jedi import debug
from jedi.common import unite
from jedi.evaluate.utils import unite
from jedi.evaluate import representation as er
from jedi.evaluate import imports
from jedi.evaluate import recursion
from jedi.evaluate import iterable
from jedi.evaluate.cache import evaluator_function_cache
from jedi.evaluate import stdlib
from jedi.evaluate import finder
from jedi.evaluate import compiled
from jedi.evaluate import precedence
from jedi.evaluate import param
from jedi.evaluate import helpers
from jedi.evaluate import pep0484
from jedi.evaluate.filters import TreeNameDefinition, ParamName
from jedi.evaluate.instance import AnonymousInstance, BoundMethod
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
from jedi.evaluate.context import ContextualizedName, ContextualizedNode, \
ContextSet, NO_CONTEXTS, iterate_contexts
from jedi.evaluate.syntax_tree import eval_trailer, eval_expr_stmt, \
eval_node, check_tuple_assignments
from jedi import parser_utils
def _limit_context_infers(func):
"""
This is for now the way how we limit type inference going wild. There are
other ways to ensure recursion limits as well. This is mostly necessary
because of instance (self) access that can be quite tricky to limit.
I'm still not sure this is the way to go, but it looks okay for now and we
can still go anther way in the future. Tests are there. ~ dave
"""
def wrapper(evaluator, context, *args, **kwargs):
n = context.tree_node
try:
evaluator.inferred_element_counts[n] += 1
if evaluator.inferred_element_counts[n] > 300:
debug.warning('In context %s there were too many inferences.', n)
return set()
except KeyError:
evaluator.inferred_element_counts[n] = 1
return func(evaluator, context, *args, **kwargs)
return wrapper
class Evaluator(object):
def __init__(self, grammar, sys_path=None):
self.grammar = grammar
@@ -141,82 +118,9 @@ class Evaluator(object):
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
def find_types(self, context, name_or_str, name_context, position=None,
search_global=False, is_goto=False, analysis_errors=True):
"""
This is the search function. The most important part to debug.
`remove_statements` and `filter_statements` really are the core part of
this completion.
:param position: Position of the last statement -> tuple of line, column
:return: List of Names. Their parents are the types.
"""
f = finder.NameFinder(self, context, name_context, name_or_str,
position, analysis_errors=analysis_errors)
filters = f.get_filters(search_global)
if is_goto:
return f.filter_name(filters)
return f.find(filters, attribute_lookup=not search_global)
@_limit_context_infers
def eval_statement(self, context, stmt, seek_name=None):
with recursion.execution_allowed(self, stmt) as allowed:
if allowed or context.get_root_context() == self.BUILTINS:
return self._eval_stmt(context, stmt, seek_name)
return set()
#@evaluator_function_cache(default=[])
@debug.increase_indent
def _eval_stmt(self, context, stmt, seek_name=None):
"""
The starting point of the completion. A statement always owns a call
list, which are the calls, that a statement does. In case multiple
names are defined in the statement, `seek_name` returns the result for
this name.
:param stmt: A `tree.ExprStmt`.
"""
debug.dbg('eval_statement %s (%s)', stmt, seek_name)
rhs = stmt.get_rhs()
types = self.eval_element(context, rhs)
if seek_name:
c_node = ContextualizedName(context, seek_name)
types = finder.check_tuple_assignments(self, c_node, types)
first_operator = next(stmt.yield_operators(), None)
if first_operator not in ('=', None) and first_operator.type == 'operator':
# `=` is always the last character in aug assignments -> -1
operator = copy.copy(first_operator)
operator.value = operator.value[:-1]
name = stmt.get_defined_names()[0].value
left = context.py__getattribute__(
name, position=stmt.start_pos, search_global=True)
for_stmt = tree.search_ancestor(stmt, 'for_stmt')
if for_stmt is not None and for_stmt.type == 'for_stmt' and types \
and parser_utils.for_stmt_defines_one_name(for_stmt):
# Iterate through result and add the values, that's possible
# only in for loops without clutter, because they are
# predictable. Also only do it, if the variable is not a tuple.
node = for_stmt.get_testlist()
cn = ContextualizedNode(context, node)
ordered = list(iterable.py__iter__(self, cn.infer(), cn))
for lazy_context in ordered:
dct = {for_stmt.children[1].value: lazy_context.infer()}
with helpers.predefine_names(context, for_stmt, dct):
t = self.eval_element(context, rhs)
left = precedence.calculate(self, context, left, operator, t)
types = left
else:
types = precedence.calculate(self, context, left, operator, types)
debug.dbg('eval_statement result %s', types)
return types
def eval_element(self, context, element):
if isinstance(context, iterable.CompForContext):
return self._eval_element_not_cached(context, element)
return eval_node(context, element)
if_stmt = element
while if_stmt is not None:
@@ -261,23 +165,23 @@ class Evaluator(object):
new_name_dicts = list(original_name_dicts)
for i, name_dict in enumerate(new_name_dicts):
new_name_dicts[i] = name_dict.copy()
new_name_dicts[i][if_name.value] = set([definition])
new_name_dicts[i][if_name.value] = ContextSet(definition)
name_dicts += new_name_dicts
else:
for name_dict in name_dicts:
name_dict[if_name.value] = definitions
if len(name_dicts) > 1:
result = set()
result = ContextSet()
for name_dict in name_dicts:
with helpers.predefine_names(context, if_stmt, name_dict):
result |= self._eval_element_not_cached(context, element)
result |= eval_node(context, element)
return result
else:
return self._eval_element_if_evaluated(context, element)
else:
if predefined_if_name_dict:
return self._eval_element_not_cached(context, element)
return eval_node(context, element)
else:
return self._eval_element_if_evaluated(context, element)
@@ -290,201 +194,32 @@ class Evaluator(object):
parent = parent.parent
predefined_if_name_dict = context.predefined_names.get(parent)
if predefined_if_name_dict is not None:
return self._eval_element_not_cached(context, element)
return eval_node(context, element)
return self._eval_element_cached(context, element)
@evaluator_function_cache(default=set())
@evaluator_function_cache(default=NO_CONTEXTS)
def _eval_element_cached(self, context, element):
return self._eval_element_not_cached(context, element)
@debug.increase_indent
@_limit_context_infers
def _eval_element_not_cached(self, context, element):
debug.dbg('eval_element %s@%s', element, element.start_pos)
types = set()
typ = element.type
if typ in ('name', 'number', 'string', 'atom'):
types = self.eval_atom(context, element)
elif typ == 'keyword':
# For False/True/None
if element.value in ('False', 'True', 'None'):
types.add(compiled.builtin_from_name(self, element.value))
# else: print e.g. could be evaluated like this in Python 2.7
elif typ == 'lambdef':
types = set([er.FunctionContext(self, context, element)])
elif typ == 'expr_stmt':
types = self.eval_statement(context, element)
elif typ in ('power', 'atom_expr'):
first_child = element.children[0]
if not (first_child.type == 'keyword' and first_child.value == 'await'):
types = self.eval_atom(context, first_child)
for trailer in element.children[1:]:
if trailer == '**': # has a power operation.
right = self.eval_element(context, element.children[2])
types = set(precedence.calculate(self, context, types, trailer, right))
break
types = self.eval_trailer(context, types, trailer)
elif typ in ('testlist_star_expr', 'testlist',):
# The implicit tuple in statements.
types = set([iterable.SequenceLiteralContext(self, context, element)])
elif typ in ('not_test', 'factor'):
types = self.eval_element(context, element.children[-1])
for operator in element.children[:-1]:
types = set(precedence.factor_calculate(self, types, operator))
elif typ == 'test':
# `x if foo else y` case.
types = (self.eval_element(context, element.children[0]) |
self.eval_element(context, element.children[-1]))
elif typ == 'operator':
# Must be an ellipsis, other operators are not evaluated.
# In Python 2 ellipsis is coded as three single dot tokens, not
# as one token 3 dot token.
assert element.value in ('.', '...')
types = set([compiled.create(self, Ellipsis)])
elif typ == 'dotted_name':
types = self.eval_atom(context, element.children[0])
for next_name in element.children[2::2]:
# TODO add search_global=True?
types = unite(
typ.py__getattribute__(next_name, name_context=context)
for typ in types
)
types = types
elif typ == 'eval_input':
types = self._eval_element_not_cached(context, element.children[0])
elif typ == 'annassign':
types = pep0484._evaluate_for_annotation(context, element.children[1])
else:
types = precedence.calculate_children(self, context, element.children)
debug.dbg('eval_element result %s', types)
return types
def eval_atom(self, context, atom):
"""
Basically to process ``atom`` nodes. The parser sometimes doesn't
generate the node (because it has just one child). In that case an atom
might be a name or a literal as well.
"""
if atom.type == 'name':
# This is the first global lookup.
stmt = tree.search_ancestor(
atom, 'expr_stmt', 'lambdef'
) or atom
if stmt.type == 'lambdef':
stmt = atom
return context.py__getattribute__(
name_or_str=atom,
position=stmt.start_pos,
search_global=True
)
elif isinstance(atom, tree.Literal):
string = parser_utils.safe_literal_eval(atom.value)
return set([compiled.create(self, string)])
else:
c = atom.children
if c[0].type == 'string':
# Will be one string.
types = self.eval_atom(context, c[0])
for string in c[1:]:
right = self.eval_atom(context, string)
types = precedence.calculate(self, context, types, '+', right)
return types
# Parentheses without commas are not tuples.
elif c[0] == '(' and not len(c) == 2 \
and not(c[1].type == 'testlist_comp' and
len(c[1].children) > 1):
return self.eval_element(context, c[1])
try:
comp_for = c[1].children[1]
except (IndexError, AttributeError):
pass
else:
if comp_for == ':':
# Dict comprehensions have a colon at the 3rd index.
try:
comp_for = c[1].children[3]
except IndexError:
pass
if comp_for.type == 'comp_for':
return set([iterable.Comprehension.from_atom(self, context, atom)])
# It's a dict/list/tuple literal.
array_node = c[1]
try:
array_node_c = array_node.children
except AttributeError:
array_node_c = []
if c[0] == '{' and (array_node == '}' or ':' in array_node_c):
context = iterable.DictLiteralContext(self, context, atom)
else:
context = iterable.SequenceLiteralContext(self, context, atom)
return set([context])
def eval_trailer(self, context, types, trailer):
trailer_op, node = trailer.children[:2]
if node == ')': # `arglist` is optional.
node = ()
new_types = set()
if trailer_op == '[':
new_types |= iterable.py__getitem__(self, context, types, trailer)
else:
for typ in types:
debug.dbg('eval_trailer: %s in scope %s', trailer, typ)
if trailer_op == '.':
new_types |= typ.py__getattribute__(
name_context=context,
name_or_str=node
)
elif trailer_op == '(':
arguments = param.TreeArguments(self, context, node, trailer)
new_types |= self.execute(typ, arguments)
return new_types
@debug.increase_indent
def execute(self, obj, arguments):
if self.is_analysis:
arguments.eval_all()
debug.dbg('execute: %s %s', obj, arguments)
try:
# Some stdlib functions like super(), namedtuple(), etc. have been
# hard-coded in Jedi to support them.
return stdlib.execute(self, obj, arguments)
except stdlib.NotInStdLib:
pass
try:
func = obj.py__call__
except AttributeError:
debug.warning("no execution possible %s", obj)
return set()
else:
types = func(arguments)
debug.dbg('execute result: %s in %s', types, obj)
return types
return eval_node(context, element)
def goto_definitions(self, context, name):
def_ = name.get_definition(import_name_always=True)
if def_ is not None:
type_ = def_.type
if type_ == 'classdef':
return [er.ClassContext(self, name.parent, context)]
return [er.ClassContext(self, context, name.parent)]
elif type_ == 'funcdef':
return [er.FunctionContext(self, context, name.parent)]
if type_ == 'expr_stmt':
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
return self.eval_statement(context, def_, name)
return eval_expr_stmt(context, def_, name)
if type_ == 'for_stmt':
container_types = self.eval_element(context, def_.children[3])
container_types = context.eval_node(def_.children[3])
cn = ContextualizedNode(context, def_.children[3])
for_types = iterable.py__iter__types(self, container_types, cn)
for_types = iterate_contexts(container_types, cn)
c_node = ContextualizedName(context, name)
return finder.check_tuple_assignments(self, c_node, for_types)
return check_tuple_assignments(self, c_node, for_types)
if type_ in ('import_from', 'import_name'):
return imports.infer_import(context, name)
@@ -509,25 +244,25 @@ class Evaluator(object):
return module_names
par = name.parent
typ = par.type
if typ == 'argument' and par.children[1] == '=' and par.children[0] == name:
node_type = par.type
if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name:
# Named param goto.
trailer = par.parent
if trailer.type == 'arglist':
trailer = trailer.parent
if trailer.type != 'classdef':
if trailer.type == 'decorator':
types = self.eval_element(context, trailer.children[1])
context_set = context.eval_node(trailer.children[1])
else:
i = trailer.parent.children.index(trailer)
to_evaluate = trailer.parent.children[:i]
types = self.eval_element(context, to_evaluate[0])
context_set = context.eval_node(to_evaluate[0])
for trailer in to_evaluate[1:]:
types = self.eval_trailer(context, types, trailer)
context_set = eval_trailer(context, context_set, trailer)
param_names = []
for typ in types:
for context in context_set:
try:
get_param_names = typ.get_param_names
get_param_names = context.get_param_names
except AttributeError:
pass
else:
@@ -535,18 +270,18 @@ class Evaluator(object):
if param_name.string_name == name.value:
param_names.append(param_name)
return param_names
elif typ == 'dotted_name': # Is a decorator.
elif node_type == 'dotted_name': # Is a decorator.
index = par.children.index(name)
if index > 0:
new_dotted = helpers.deep_ast_copy(par)
new_dotted.children[index - 1:] = []
values = self.eval_element(context, new_dotted)
values = context.eval_node(new_dotted)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
for value in values
)
if typ == 'trailer' and par.children[0] == '.':
if node_type == 'trailer' and par.children[0] == '.':
values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True)
return unite(
value.py__getattribute__(name, name_context=context, is_goto=True)
@@ -604,7 +339,7 @@ class Evaluator(object):
return func.get_function_execution()
return func
elif scope_node.type == 'classdef':
class_context = er.ClassContext(self, scope_node, parent_context)
class_context = er.ClassContext(self, parent_context, scope_node)
if child_is_funcdef:
# anonymous instance
return AnonymousInstance(self, parent_context, class_context)

View File

@@ -4,8 +4,6 @@
- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes.
"""
import inspect
_NO_DEFAULT = object()
@@ -40,8 +38,6 @@ def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_a
if default is not _NO_DEFAULT:
memo[key] = default
rv = function(obj, *args, **kwargs)
if inspect.isgenerator(rv):
rv = list(rv)
memo[key] = rv
return rv
return wrapper

View File

@@ -13,7 +13,7 @@ from jedi import debug
from jedi.cache import underscore_memoization, memoize_method
from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \
ContextNameMixin
from jedi.evaluate.context import Context, LazyKnownContext
from jedi.evaluate.context import Context, LazyKnownContext, ContextSet
from jedi.evaluate.compiled.getattr_static import getattr_static
from . import fake
@@ -83,9 +83,9 @@ class CompiledObject(Context):
def py__call__(self, params):
if inspect.isclass(self.obj):
from jedi.evaluate.instance import CompiledInstance
return set([CompiledInstance(self.evaluator, self.parent_context, self, params)])
return ContextSet(CompiledInstance(self.evaluator, self.parent_context, self, params))
else:
return set(self._execute_function(params))
return ContextSet.from_iterable(self._execute_function(params))
@CheckAttribute
def py__class__(self):
@@ -221,9 +221,9 @@ class CompiledObject(Context):
def py__getitem__(self, index):
if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict):
# Get rid of side effects, we won't call custom `__getitem__`s.
return set()
return ContextSet()
return set([create(self.evaluator, self.obj[index])])
return ContextSet(create(self.evaluator, self.obj[index]))
@CheckAttribute
def py__iter__(self):
@@ -266,7 +266,7 @@ class CompiledObject(Context):
# TODO do we?
continue
bltn_obj = create(self.evaluator, bltn_obj)
for result in self.evaluator.execute(bltn_obj, params):
for result in bltn_obj.execute(params):
yield result
for type_ in docstrings.infer_return_types(self):
yield type_
@@ -278,7 +278,9 @@ class CompiledObject(Context):
return [] # Builtins don't have imports
def dict_values(self):
return set(create(self.evaluator, v) for v in self.obj.values())
return ContextSet.from_iterable(
create(self.evaluator, v) for v in self.obj.values()
)
class CompiledName(AbstractNameDefinition):
@@ -301,7 +303,9 @@ class CompiledName(AbstractNameDefinition):
@underscore_memoization
def infer(self):
module = self.parent_context.get_root_context()
return [_create_from_name(self._evaluator, module, self.parent_context, self.string_name)]
return ContextSet(_create_from_name(
self._evaluator, module, self.parent_context, self.string_name
))
class SignatureParamName(AbstractNameDefinition):
@@ -318,13 +322,13 @@ class SignatureParamName(AbstractNameDefinition):
def infer(self):
p = self._signature_param
evaluator = self.parent_context.evaluator
types = set()
contexts = ContextSet()
if p.default is not p.empty:
types.add(create(evaluator, p.default))
contexts = ContextSet(create(evaluator, p.default))
if p.annotation is not p.empty:
annotation = create(evaluator, p.annotation)
types |= annotation.execute_evaluated()
return types
contexts |= annotation.execute_evaluated()
return contexts
class UnresolvableParamName(AbstractNameDefinition):
@@ -335,7 +339,7 @@ class UnresolvableParamName(AbstractNameDefinition):
self.string_name = name
def infer(self):
return set()
return ContextSet()
class CompiledContextName(ContextNameMixin, AbstractNameDefinition):
@@ -356,7 +360,7 @@ class EmptyCompiledName(AbstractNameDefinition):
self.string_name = name
def infer(self):
return []
return ContextSet()
class CompiledObjectFilter(AbstractFilter):

View File

@@ -9,7 +9,7 @@ from jedi import settings
from jedi.evaluate import compiled
from jedi.cache import underscore_memoization
from jedi.evaluate import imports
from jedi.evaluate.context import Context
from jedi.evaluate.context import Context, ContextSet
from jedi.evaluate.cache import evaluator_function_cache
from jedi.evaluate.compiled.getattr_static import getattr_static
@@ -41,9 +41,6 @@ class MixedObject(object):
# We have to overwrite everything that has to do with trailers, name
# lookups and filters to make it possible to route name lookups towards
# compiled objects and the rest towards tree node contexts.
def eval_trailer(*args, **kwags):
return Context.eval_trailer(*args, **kwags)
def py__getattribute__(*args, **kwargs):
return Context.py__getattribute__(*args, **kwargs)
@@ -85,7 +82,9 @@ class MixedName(compiled.CompiledName):
# PyQt4.QtGui.QStyleOptionComboBox.currentText
# -> just set it to None
obj = None
return [_create(self._evaluator, obj, parent_context=self.parent_context)]
return ContextSet(
_create(self._evaluator, obj, parent_context=self.parent_context)
)
@property
def api_type(self):

View File

@@ -1,7 +1,9 @@
from jedi._compatibility import Python3Method
from jedi.common import unite
from parso.python.tree import ExprStmt, CompFor
from jedi import debug
from jedi._compatibility import Python3Method, zip_longest, unicode
from jedi.parser_utils import clean_scope_docstring, get_doc_with_call_signature
from jedi.common import BaseContextSet
class Context(object):
@@ -32,7 +34,36 @@ class Context(object):
return context
context = context.parent_context
@debug.increase_indent
def execute(self, arguments):
"""
In contrast to py__call__ this function is always available.
`hasattr(x, py__call__)` can also be checked to see if a context is
executable.
"""
if self.evaluator.is_analysis:
arguments.eval_all()
debug.dbg('execute: %s %s', self, arguments)
from jedi.evaluate import stdlib
try:
# Some stdlib functions like super(), namedtuple(), etc. have been
# hard-coded in Jedi to support them.
return stdlib.execute(self.evaluator, self, arguments)
except stdlib.NotInStdLib:
pass
try:
func = self.py__call__
except AttributeError:
debug.warning("no execution possible %s", self)
return NO_CONTEXTS
else:
context_set = func(arguments)
debug.dbg('execute result: %s in %s', context_set, self)
return context_set
return self.evaluator.execute(self, arguments)
def execute_evaluated(self, *value_list):
@@ -40,24 +71,87 @@ class Context(object):
Execute a function with already executed arguments.
"""
from jedi.evaluate.param import ValuesArguments
arguments = ValuesArguments([[value] for value in value_list])
arguments = ValuesArguments([ContextSet(value) for value in value_list])
return self.execute(arguments)
def iterate(self, contextualized_node=None):
debug.dbg('iterate')
try:
iter_method = self.py__iter__
except AttributeError:
if contextualized_node is not None:
from jedi.evaluate import analysis
analysis.add(
contextualized_node.context,
'type-error-not-iterable',
contextualized_node.node,
message="TypeError: '%s' object is not iterable" % self)
return iter([])
else:
return iter_method()
def get_item(self, index_contexts, contextualized_node):
from jedi.evaluate.compiled import CompiledObject
from jedi.evaluate.iterable import Slice, AbstractSequence
result = ContextSet()
for index in index_contexts:
if isinstance(index, (CompiledObject, Slice)):
index = index.obj
if type(index) not in (float, int, str, unicode, slice, type(Ellipsis)):
# If the index is not clearly defined, we have to get all the
# possiblities.
if isinstance(self, AbstractSequence) and self.array_type == 'dict':
result |= self.dict_values()
else:
result |= iterate_contexts(ContextSet(self))
continue
# The actual getitem call.
try:
getitem = self.py__getitem__
except AttributeError:
from jedi.evaluate import analysis
# TODO this context is probably not right.
analysis.add(
contextualized_node.context,
'type-error-not-subscriptable',
contextualized_node.node,
message="TypeError: '%s' object is not subscriptable" % self
)
else:
try:
result |= getitem(index)
except IndexError:
result |= iterate_contexts(ContextSet(self))
except KeyError:
# Must be a dict. Lists don't raise KeyErrors.
result |= self.dict_values()
return result
def eval_node(self, node):
return self.evaluator.eval_element(self, node)
def eval_stmt(self, stmt, seek_name=None):
return self.evaluator.eval_statement(self, stmt, seek_name)
def eval_trailer(self, types, trailer):
return self.evaluator.eval_trailer(self, types, trailer)
@Python3Method
def py__getattribute__(self, name_or_str, name_context=None, position=None,
search_global=False, is_goto=False,
analysis_errors=True):
"""
This is the search function.
:param position: Position of the last statement -> tuple of line, column
"""
if name_context is None:
name_context = self
from jedi.evaluate import finder
f = finder.NameFinder(self.evaluator, self, name_context, name_or_str,
position, analysis_errors=analysis_errors)
filters = f.get_filters(search_global)
if is_goto:
return f.filter_name(filters)
return f.find(filters, attribute_lookup=not search_global)
return self.evaluator.find_types(
self, name_or_str, name_context, position, search_global, is_goto,
analysis_errors)
@@ -88,6 +182,17 @@ class Context(object):
return None
def iterate_contexts(contexts, contextualized_node=None):
"""
Calls `iterate`, on all contexts but ignores the ordering and just returns
all contexts that the iterate functions yield.
"""
return ContextSet.from_sets(
lazy_context.infer()
for lazy_context in contexts.iterate(contextualized_node)
)
class TreeContext(Context):
def __init__(self, evaluator, parent_context=None):
super(TreeContext, self).__init__(evaluator, parent_context)
@@ -111,11 +216,11 @@ class AbstractLazyContext(object):
class LazyKnownContext(AbstractLazyContext):
"""data is a context."""
def infer(self):
return set([self.data])
return ContextSet(self.data)
class LazyKnownContexts(AbstractLazyContext):
"""data is a set of contexts."""
"""data is a ContextSet."""
def infer(self):
return self.data
@@ -125,7 +230,7 @@ class LazyUnknownContext(AbstractLazyContext):
super(LazyUnknownContext, self).__init__(None)
def infer(self):
return set()
return NO_CONTEXTS
class LazyTreeContext(AbstractLazyContext):
@@ -155,26 +260,26 @@ def get_merged_lazy_context(lazy_contexts):
class MergedLazyContexts(AbstractLazyContext):
"""data is a list of lazy contexts."""
def infer(self):
return unite(l.infer() for l in self.data)
return ContextSet.from_sets(l.infer() for l in self.data)
class ContextualizedNode(object):
def __init__(self, context, node):
self.context = context
self._node = node
self.node = node
def get_root_context(self):
return self.context.get_root_context()
def infer(self):
return self.context.eval_node(self._node)
return self.context.eval_node(self.node)
class ContextualizedName(ContextualizedNode):
# TODO merge with TreeNameDefinition?!
@property
def name(self):
return self._node
return self.node
def assignment_indexes(self):
"""
@@ -188,8 +293,8 @@ class ContextualizedName(ContextualizedNode):
would result in ``[(1, xyz_node), (0, yz_node)]``.
"""
indexes = []
node = self._node.parent
compare = self._node
node = self.node.parent
compare = self.node
while node is not None:
if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'):
for i, child in enumerate(node.children):
@@ -204,3 +309,25 @@ class ContextualizedName(ContextualizedNode):
compare = node
node = node.parent
return indexes
class ContextSet(BaseContextSet):
def py__class__(self):
return ContextSet.from_iterable(c.py__class__() for c in self._set)
def iterate(self, contextualized_node=None):
type_iters = [c.iterate(contextualized_node) for c in self._set]
for lazy_contexts in zip_longest(*type_iters):
yield get_merged_lazy_context(
[l for l in lazy_contexts if l is not None]
)
NO_CONTEXTS = ContextSet()
def iterator_to_context_set(func):
def wrapper(*args, **kwargs):
return ContextSet.from_iterable(func(*args, **kwargs))
return wrapper

View File

@@ -21,11 +21,11 @@ from textwrap import dedent
from parso import parse
from jedi._compatibility import u
from jedi.common import unite
from jedi.evaluate import context
from jedi.evaluate.utils import indent_block
from jedi.evaluate.cache import evaluator_method_cache
from jedi.common import indent_block
from jedi.evaluate.iterable import SequenceLiteralContext, FakeSequence
from jedi.evaluate.context import iterator_to_context_set, ContextSet, \
NO_CONTEXTS, LazyKnownContexts
DOCSTRING_PARAM_PATTERNS = [
@@ -223,7 +223,10 @@ def _execute_types_in_stmt(module_context, stmt):
contain is executed. (Used as type information).
"""
definitions = module_context.eval_node(stmt)
return unite(_execute_array_values(module_context.evaluator, d) for d in definitions)
return ContextSet.from_sets(
_execute_array_values(module_context.evaluator, d)
for d in definitions
)
def _execute_array_values(evaluator, array):
@@ -234,8 +237,11 @@ def _execute_array_values(evaluator, array):
if isinstance(array, SequenceLiteralContext):
values = []
for lazy_context in array.py__iter__():
objects = unite(_execute_array_values(evaluator, typ) for typ in lazy_context.infer())
values.append(context.LazyKnownContexts(objects))
objects = ContextSet.from_sets(
_execute_array_values(evaluator, typ)
for typ in lazy_context.infer()
)
values.append(LazyKnownContexts(objects))
return set([FakeSequence(evaluator, array.array_type, values)])
else:
return array.execute_evaluated()
@@ -246,7 +252,7 @@ def infer_param(execution_context, param):
from jedi.evaluate.instance import AnonymousInstanceFunctionExecution
def eval_docstring(docstring):
return set(
return ContextSet.from_iterable(
p
for param_str in _search_param_in_docstr(docstring, param.name.value)
for p in _evaluate_for_statement_string(module_context, param_str)
@@ -254,7 +260,7 @@ def infer_param(execution_context, param):
module_context = execution_context.get_root_context()
func = param.get_parent_function()
if func.type == 'lambdef':
return set()
return NO_CONTEXTS
types = eval_docstring(execution_context.py__doc__())
if isinstance(execution_context, AnonymousInstanceFunctionExecution) and \
@@ -266,6 +272,7 @@ def infer_param(execution_context, param):
@evaluator_method_cache()
@iterator_to_context_set
def infer_return_types(function_context):
def search_return_in_docstr(code):
for p in DOCSTRING_RETURN_PATTERNS:
@@ -279,4 +286,3 @@ def infer_return_types(function_context):
for type_str in search_return_in_docstr(function_context.py__doc__()):
for type_eval in _evaluate_for_statement_string(function_context.get_root_context(), type_str):
yield type_eval

View File

@@ -14,7 +14,7 @@ It works as follows:
- |Jedi| sees a param
- search for function calls named ``foo``
- execute these calls and check the input. This work with a ``ParamListener``.
- execute these calls and check the input.
"""
from parso.python import tree
@@ -24,24 +24,14 @@ from jedi.evaluate.cache import evaluator_function_cache
from jedi.evaluate import imports
from jedi.evaluate.param import TreeArguments, create_default_params
from jedi.evaluate.helpers import is_stdlib_path
from jedi.common import to_list, unite
from jedi.evaluate.utils import to_list
from jedi.evaluate.context import ContextSet
from jedi.parser_utils import get_parent_scope
MAX_PARAM_SEARCHES = 20
class ParamListener(object):
"""
This listener is used to get the params for a function.
"""
def __init__(self):
self.param_possibilities = []
def execute(self, params):
self.param_possibilities += params
class MergedExecutedParams(object):
"""
Simulates being a parameter while actually just being multiple params.
@@ -50,7 +40,7 @@ class MergedExecutedParams(object):
self._executed_params = executed_params
def infer(self):
return unite(p.infer() for p in self._executed_params)
return ContextSet.from_sets(p.infer() for p in self._executed_params)
@debug.increase_indent
@@ -103,7 +93,7 @@ def search_params(evaluator, execution_context, funcdef):
evaluator.dynamic_params_depth -= 1
@evaluator_function_cache(default=[])
@evaluator_function_cache(default=None)
@to_list
def _search_function_executions(evaluator, module_context, funcdef):
"""

View File

@@ -6,8 +6,9 @@ from abc import abstractmethod
from parso.tree import search_ancestor
from jedi.evaluate import flow_analysis
from jedi.common import to_list, unite
from jedi.evaluate.context import ContextSet
from jedi.parser_utils import get_parent_scope
from jedi.evaluate.utils import to_list
class AbstractNameDefinition(object):
@@ -35,10 +36,10 @@ class AbstractNameDefinition(object):
return '<%s: %s@%s>' % (self.__class__.__name__, self.string_name, self.start_pos)
def execute(self, arguments):
return unite(context.execute(arguments) for context in self.infer())
return self.infer().execute(arguments)
def execute_evaluated(self, *args, **kwargs):
return unite(context.execute_evaluated(*args, **kwargs) for context in self.infer())
return self.infer().execute_evaluated(*args, **kwargs)
@property
def api_type(self):
@@ -64,7 +65,7 @@ class AbstractTreeName(AbstractNameDefinition):
class ContextNameMixin(object):
def infer(self):
return set([self._context])
return ContextSet(self._context)
def get_root_context(self):
if self.parent_context is None:
@@ -93,8 +94,8 @@ class TreeNameDefinition(AbstractTreeName):
def infer(self):
# Refactor this, should probably be here.
from jedi.evaluate.finder import _name_to_types
return _name_to_types(self.parent_context.evaluator, self.parent_context, self.tree_name)
from jedi.evaluate.syntax_tree import tree_name_to_contexts
return tree_name_to_contexts(self.parent_context.evaluator, self.parent_context, self.tree_name)
@property
def api_type(self):
@@ -128,7 +129,7 @@ class AnonymousInstanceParamName(ParamName):
if param_node.position_index == 0:
# This is a speed optimization, to return the self param (because
# it's known). This only affects anonymous instances.
return set([self.parent_context.instance])
return ContextSet(self.parent_context.instance)
else:
return self.get_param().infer()

View File

@@ -18,20 +18,16 @@ check for -> a is a string). There's big potential in these checks.
from parso.python import tree
from parso.tree import search_ancestor
from jedi import debug
from jedi.common import unite
from jedi import settings
from jedi.evaluate import representation as er
from jedi.evaluate.instance import AbstractInstanceContext
from jedi.evaluate import compiled
from jedi.evaluate import pep0484
from jedi.evaluate import iterable
from jedi.evaluate import imports
from jedi.evaluate import analysis
from jedi.evaluate import flow_analysis
from jedi.evaluate import param
from jedi.evaluate import helpers
from jedi.evaluate.filters import get_global_filters, TreeNameDefinition
from jedi.evaluate.context import ContextualizedName, ContextualizedNode
from jedi.evaluate.context import ContextSet
from jedi.parser_utils import is_scope, get_parent_scope
@@ -62,7 +58,7 @@ class NameFinder(object):
check = flow_analysis.reachability_check(
self._context, self._context.tree_node, self._name)
if check is flow_analysis.UNREACHABLE:
return set()
return ContextSet()
return self._found_predefined_types
types = self._names_to_types(names, attribute_lookup)
@@ -158,22 +154,20 @@ class NameFinder(object):
return inst.execute_function_slots(names, name)
def _names_to_types(self, names, attribute_lookup):
types = set()
contexts = ContextSet.from_sets(name.infer() for name in names)
types = unite(name.infer() for name in names)
debug.dbg('finder._names_to_types: %s -> %s', names, types)
debug.dbg('finder._names_to_types: %s -> %s', names, contexts)
if not names and isinstance(self._context, AbstractInstanceContext):
# handling __getattr__ / __getattribute__
return self._check_getattr(self._context)
# Add isinstance and other if/assert knowledge.
if not types and isinstance(self._name, tree.Name) and \
if not contexts and isinstance(self._name, tree.Name) and \
not isinstance(self._name_context, AbstractInstanceContext):
flow_scope = self._name
base_node = self._name_context.tree_node
if base_node.type == 'comp_for':
return types
return contexts
while True:
flow_scope = get_parent_scope(flow_scope, include_flows=True)
n = _check_flow_information(self._name_context, flow_scope,
@@ -182,132 +176,7 @@ class NameFinder(object):
return n
if flow_scope == base_node:
break
return types
def _name_to_types(evaluator, context, tree_name):
types = []
node = tree_name.get_definition(import_name_always=True)
if node is None:
node = tree_name.parent
if node.type == 'global_stmt':
context = evaluator.create_context(context, tree_name)
finder = NameFinder(evaluator, context, context, tree_name.value)
filters = finder.get_filters(search_global=True)
# For global_stmt lookups, we only need the first possible scope,
# which means the function itself.
filters = [next(filters)]
return finder.find(filters, attribute_lookup=False)
elif node.type not in ('import_from', 'import_name'):
raise ValueError("Should not happen.")
typ = node.type
if typ == 'for_stmt':
types = pep0484.find_type_from_comment_hint_for(context, node, tree_name)
if types:
return types
if typ == 'with_stmt':
types = pep0484.find_type_from_comment_hint_with(context, node, tree_name)
if types:
return types
if typ in ('for_stmt', 'comp_for'):
try:
types = context.predefined_names[node][tree_name.value]
except KeyError:
cn = ContextualizedNode(context, node.children[3])
for_types = iterable.py__iter__types(evaluator, cn.infer(), cn)
c_node = ContextualizedName(context, tree_name)
types = check_tuple_assignments(evaluator, c_node, for_types)
elif typ == 'expr_stmt':
types = _remove_statements(evaluator, context, node, tree_name)
elif typ == 'with_stmt':
context_managers = context.eval_node(node.get_test_node_from_name(tree_name))
enter_methods = unite(
context_manager.py__getattribute__('__enter__')
for context_manager in context_managers
)
types = unite(method.execute_evaluated() for method in enter_methods)
elif typ in ('import_from', 'import_name'):
types = imports.infer_import(context, tree_name)
elif typ in ('funcdef', 'classdef'):
types = _apply_decorators(evaluator, context, node)
elif typ == 'try_stmt':
# TODO an exception can also be a tuple. Check for those.
# TODO check for types that are not classes and add it to
# the static analysis report.
exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling())
types = unite(
evaluator.execute(t, param.ValuesArguments([]))
for t in exceptions
)
else:
raise ValueError("Should not happen.")
return types
def _apply_decorators(evaluator, context, node):
"""
Returns the function, that should to be executed in the end.
This is also the places where the decorators are processed.
"""
if node.type == 'classdef':
decoratee_context = er.ClassContext(
evaluator,
parent_context=context,
classdef=node
)
else:
decoratee_context = er.FunctionContext(
evaluator,
parent_context=context,
funcdef=node
)
initial = values = set([decoratee_context])
for dec in reversed(node.get_decorators()):
debug.dbg('decorator: %s %s', dec, values)
dec_values = context.eval_node(dec.children[1])
trailer_nodes = dec.children[2:-1]
if trailer_nodes:
# Create a trailer and evaluate it.
trailer = tree.PythonNode('trailer', trailer_nodes)
trailer.parent = dec
dec_values = evaluator.eval_trailer(context, dec_values, trailer)
if not len(dec_values):
debug.warning('decorator not found: %s on %s', dec, node)
return initial
values = unite(dec_value.execute(param.ValuesArguments([values]))
for dec_value in dec_values)
if not len(values):
debug.warning('not possible to resolve wrappers found %s', node)
return initial
debug.dbg('decorator end %s', values)
return values
def _remove_statements(evaluator, context, stmt, name):
"""
This is the part where statements are being stripped.
Due to lazy evaluation, statements like a = func; b = a; b() have to be
evaluated.
"""
types = set()
check_instance = None
pep0484types = \
pep0484.find_type_from_comment_hint_assign(context, stmt, name)
if pep0484types:
return pep0484types
types |= context.eval_stmt(stmt, seek_name=name)
if check_instance is not None:
# class renames
types = set([er.get_instance_el(evaluator, check_instance, a, True)
if isinstance(a, er.Function) else a for a in types])
return types
return contexts
def _check_flow_information(context, flow, search_name, pos):
@@ -377,34 +246,13 @@ def _check_isinstance_type(context, element, search_name):
except AssertionError:
return None
result = set()
context_set = ContextSet()
for cls_or_tup in lazy_context_cls.infer():
if isinstance(cls_or_tup, iterable.AbstractSequence) and \
cls_or_tup.array_type == 'tuple':
for lazy_context in cls_or_tup.py__iter__():
for context in lazy_context.infer():
result |= context.execute_evaluated()
context_set |= context.execute_evaluated()
else:
result |= cls_or_tup.execute_evaluated()
return result
def check_tuple_assignments(evaluator, contextualized_name, types):
"""
Checks if tuples are assigned.
"""
lazy_context = None
for index, node in contextualized_name.assignment_indexes():
cn = ContextualizedNode(contextualized_name.context, node)
iterated = iterable.py__iter__(evaluator, types, cn)
for _ in range(index + 1):
try:
lazy_context = next(iterated)
except StopIteration:
# We could do this with the default param in next. But this
# would allow this loop to run for a very long time if the
# index number is high. Therefore break if the loop is
# finished.
return set()
types = lazy_context.infer()
return types
context_set |= cls_or_tup.execute_evaluated()
return context_set

View File

@@ -6,7 +6,10 @@ from itertools import chain
from contextlib import contextmanager
from parso.python import tree
from jedi._compatibility import unicode
from jedi.parser_utils import get_parent_scope
from jedi.evaluate.compiled import CompiledObject
def is_stdlib_path(path):
@@ -87,8 +90,9 @@ def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False):
trailers = power.children[1:cut]
values = context.eval_node(base)
from jedi.evaluate.syntax_tree import eval_trailer
for trailer in trailers:
values = context.eval_trailer(values, trailer)
values = eval_trailer(context, values, trailer)
return values
@@ -172,3 +176,19 @@ def predefine_names(context, flow_scope, dct):
yield
finally:
del predefined[flow_scope]
def is_compiled(context):
return isinstance(context, CompiledObject)
def is_string(context):
return is_compiled(context) and isinstance(context.obj, (str, unicode))
def is_literal(context):
return is_number(context) or is_string(context)
def is_number(context):
return is_compiled(context) and isinstance(context.obj, (int, float))

View File

@@ -24,18 +24,19 @@ from parso import python_bytes_to_unicode
from jedi._compatibility import find_module, unicode, ImplicitNSInfo
from jedi import debug
from jedi import settings
from jedi.common import unite
from jedi.evaluate import sys_path
from jedi.evaluate import helpers
from jedi.evaluate import compiled
from jedi.evaluate import analysis
from jedi.evaluate.utils import unite
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.filters import AbstractNameDefinition
from jedi.evaluate.context import ContextSet, NO_CONTEXTS
# This memoization is needed, because otherwise we will infinitely loop on
# certain imports.
@evaluator_method_cache(default=set())
@evaluator_method_cache(default=NO_CONTEXTS)
def infer_import(context, tree_name, is_goto=False):
module_context = context.get_root_context()
import_node = search_ancestor(tree_name, 'import_name', 'import_from')
@@ -63,7 +64,7 @@ def infer_import(context, tree_name, is_goto=False):
# scopes = [NestedImportModule(module, import_node)]
if not types:
return set()
return NO_CONTEXTS
if from_import_name is not None:
types = unite(
@@ -72,8 +73,11 @@ def infer_import(context, tree_name, is_goto=False):
name_context=context,
is_goto=is_goto,
analysis_errors=False
) for t in types
)
for t in types
)
if not is_goto:
types = ContextSet.from_set(types)
if not types:
path = import_path + [from_import_name]
@@ -270,7 +274,7 @@ class Importer(object):
def follow(self):
if not self.import_path:
return set()
return NO_CONTEXTS
return self._do_import(self.import_path, self.sys_path_with_modifications())
def _do_import(self, import_path, sys_path):
@@ -296,7 +300,7 @@ class Importer(object):
module_name = '.'.join(import_parts)
try:
return set([self._evaluator.modules[module_name]])
return ContextSet(self._evaluator.modules[module_name])
except KeyError:
pass
@@ -305,7 +309,7 @@ class Importer(object):
# the module cache.
bases = self._do_import(import_path[:-1], sys_path)
if not bases:
return set()
return NO_CONTEXTS
# We can take the first element, because only the os special
# case yields multiple modules, which is not important for
# further imports.
@@ -323,7 +327,7 @@ class Importer(object):
except AttributeError:
# The module is not a package.
_add_error(self.module_context, import_path[-1])
return set()
return NO_CONTEXTS
else:
paths = method()
debug.dbg('search_module %s in paths %s', module_name, paths)
@@ -340,7 +344,7 @@ class Importer(object):
module_path = None
if module_path is None:
_add_error(self.module_context, import_path[-1])
return set()
return NO_CONTEXTS
else:
parent_module = None
try:
@@ -356,7 +360,7 @@ class Importer(object):
except ImportError:
# The module is not a package.
_add_error(self.module_context, import_path[-1])
return set()
return NO_CONTEXTS
code = None
if is_pkg:
@@ -383,10 +387,10 @@ class Importer(object):
if module is None:
# The file might raise an ImportError e.g. and therefore not be
# importable.
return set()
return NO_CONTEXTS
self._evaluator.modules[module_name] = module
return set([module])
return ContextSet(module)
def _generate_name(self, name, in_module=None):
# Create a pseudo import to be able to follow them.

View File

@@ -1,11 +1,11 @@
from abc import abstractproperty
from jedi._compatibility import is_py3
from jedi.common import unite
from jedi import debug
from jedi.evaluate import compiled
from jedi.evaluate import filters
from jedi.evaluate.context import Context, LazyKnownContext, LazyKnownContexts
from jedi.evaluate.context import Context, LazyKnownContext, LazyKnownContexts, \
ContextSet, iterator_to_context_set, NO_CONTEXTS
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.param import AbstractArguments, AnonymousArguments
from jedi.cache import memoize_method
@@ -58,7 +58,7 @@ class AbstractInstanceContext(Context):
raise AttributeError
def execute(arguments):
return unite(name.execute(arguments) for name in names)
return ContextSet.from_sets(name.execute(arguments) for name in names)
return execute
@@ -80,7 +80,7 @@ class AbstractInstanceContext(Context):
return []
def execute_function_slots(self, names, *evaluated_args):
return unite(
return ContextSet.from_sets(
name.execute_evaluated(*evaluated_args)
for name in names
)
@@ -96,7 +96,7 @@ class AbstractInstanceContext(Context):
none_obj = compiled.create(self.evaluator, None)
return self.execute_function_slots(names, none_obj, obj)
else:
return set([self])
return ContextSet(self)
def get_filters(self, search_global=None, until_position=None,
origin_scope=None, include_self_names=True):
@@ -122,7 +122,7 @@ class AbstractInstanceContext(Context):
names = self.get_function_slot_names('__getitem__')
except KeyError:
debug.warning('No __getitem__, cannot access the array.')
return set()
return NO_CONTEXTS
else:
index_obj = compiled.create(self.evaluator, index)
return self.execute_function_slots(names, index_obj)
@@ -250,6 +250,7 @@ class CompiledInstanceName(compiled.CompiledName):
super(CompiledInstanceName, self).__init__(evaluator, parent_context, name)
self._instance = instance
@iterator_to_context_set
def infer(self):
for result_context in super(CompiledInstanceName, self).infer():
if isinstance(result_context, er.FunctionContext):
@@ -311,9 +312,7 @@ class CompiledBoundMethod(compiled.CompiledObject):
class InstanceNameDefinition(filters.TreeNameDefinition):
def infer(self):
contexts = super(InstanceNameDefinition, self).infer()
for context in contexts:
yield context
return super(InstanceNameDefinition, self).infer()
class LazyInstanceName(filters.TreeNameDefinition):
@@ -331,6 +330,7 @@ class LazyInstanceName(filters.TreeNameDefinition):
class LazyInstanceClassName(LazyInstanceName):
@iterator_to_context_set
def infer(self):
for result_context in super(LazyInstanceClassName, self).infer():
if isinstance(result_context, er.FunctionContext):

View File

@@ -22,19 +22,19 @@ It is important to note that:
"""
from jedi import debug
from jedi import settings
from jedi import common
from jedi.common import unite, safe_property
from jedi._compatibility import unicode, zip_longest, is_py3
from jedi.evaluate.utils import safe_property
from jedi._compatibility import is_py3
from jedi.evaluate.utils import to_list
from jedi.evaluate import compiled
from jedi.evaluate import helpers
from jedi.evaluate import analysis
from jedi.evaluate import pep0484
from jedi.evaluate import context
from jedi.evaluate import precedence
from jedi.evaluate.helpers import is_string
from jedi.evaluate import recursion
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.filters import DictFilter, AbstractNameDefinition, \
ParserTreeFilter
from jedi.evaluate import context
from jedi.evaluate.context import ContextSet, NO_CONTEXTS, Context
from jedi.parser_utils import get_comp_fors
@@ -53,15 +53,20 @@ class AbstractSequence(context.Context):
return compiled.CompiledContextName(self, self.array_type)
class BuiltinMethod(object):
class BuiltinMethod(Context):
"""``Generator.__next__`` ``dict.values`` methods and so on."""
api_type = 'function'
def __init__(self, builtin_context, method, builtin_func):
self._builtin_context = builtin_context
super(BuiltinMethod, self).__init__(
builtin_context.evaluator,
parent_context=builtin_context
)
self._method = method
self._builtin_func = builtin_func
def py__call__(self, params):
return self._method(self._builtin_context)
return self._method(self.parent_context)
def __getattr__(self, name):
return getattr(self._builtin_func, name)
@@ -87,7 +92,7 @@ class SpecialMethodFilter(DictFilter):
# always only going to be one name. The same is true for the
# inferred values.
builtin_func = next(iter(filter.get(self.string_name)[0].infer()))
return set([BuiltinMethod(self.parent_context, self._callable, builtin_func)])
return ContextSet(BuiltinMethod(self.parent_context, self._callable, builtin_func))
def __init__(self, context, dct, builtin_context):
super(SpecialMethodFilter, self).__init__(dct)
@@ -104,7 +109,7 @@ class SpecialMethodFilter(DictFilter):
return self.SpecialMethodName(self.context, name, value, self._builtin_context)
def has_builtin_methods(cls):
def _has_builtin_methods(cls):
base_dct = {}
# Need to care properly about inheritance. Builtin Methods should not get
# lost, just because they are not mentioned in a class.
@@ -123,7 +128,7 @@ def has_builtin_methods(cls):
return cls
def register_builtin_method(method_name, python_version_match=None):
def _register_builtin_method(method_name, python_version_match=None):
def wrapper(func):
if python_version_match and python_version_match != 2 + int(is_py3):
# Some functions do only apply to certain versions.
@@ -134,16 +139,16 @@ def register_builtin_method(method_name, python_version_match=None):
return wrapper
@has_builtin_methods
@_has_builtin_methods
class GeneratorMixin(object):
array_type = None
@register_builtin_method('send')
@register_builtin_method('next', python_version_match=2)
@register_builtin_method('__next__', python_version_match=3)
@_register_builtin_method('send')
@_register_builtin_method('next', python_version_match=2)
@_register_builtin_method('__next__', python_version_match=3)
def py__next__(self):
# TODO add TypeError if params are given.
return unite(lazy_context.infer() for lazy_context in self.py__iter__())
return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
def get_filters(self, search_global, until_position=None, origin_scope=None):
gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT')
@@ -234,14 +239,13 @@ class Comprehension(AbstractSequence):
return CompForContext.from_comp_for(parent_context, comp_for)
def _nested(self, comp_fors, parent_context=None):
evaluator = self.evaluator
comp_for = comp_fors[0]
input_node = comp_for.children[3]
parent_context = parent_context or self._defining_context
input_types = parent_context.eval_node(input_node)
cn = context.ContextualizedNode(parent_context, input_node)
iterated = py__iter__(evaluator, input_types, cn)
iterated = input_types.iterate(cn)
exprlist = comp_for.children[1]
for i, lazy_context in enumerate(iterated):
types = lazy_context.infer()
@@ -262,7 +266,7 @@ class Comprehension(AbstractSequence):
yield iterated
@evaluator_method_cache(default=[])
@common.to_list
@to_list
def _iterate(self):
comp_fors = tuple(get_comp_fors(self._get_comp_for()))
for result in self._nested(comp_fors):
@@ -296,7 +300,10 @@ class ArrayMixin(object):
return self.evaluator.BUILTINS
def dict_values(self):
return unite(self._defining_context.eval_node(v) for k, v in self._items())
return ContextSet.from_sets(
self._defining_context.eval_node(v)
for k, v in self._items()
)
class ListComprehension(ArrayMixin, Comprehension):
@@ -304,7 +311,7 @@ class ListComprehension(ArrayMixin, Comprehension):
def py__getitem__(self, index):
if isinstance(index, slice):
return set([self])
return ContextSet(self)
all_types = list(self.py__iter__())
return all_types[index].infer()
@@ -314,7 +321,7 @@ class SetComprehension(ArrayMixin, Comprehension):
array_type = 'set'
@has_builtin_methods
@_has_builtin_methods
class DictComprehension(ArrayMixin, Comprehension):
array_type = 'dict'
@@ -334,16 +341,16 @@ class DictComprehension(ArrayMixin, Comprehension):
return self.dict_values()
def dict_values(self):
return unite(values for keys, values in self._iterate())
return ContextSet.from_sets(values for keys, values in self._iterate())
@register_builtin_method('values')
@_register_builtin_method('values')
def _imitate_values(self):
lazy_context = context.LazyKnownContexts(self.dict_values())
return set([FakeSequence(self.evaluator, 'list', [lazy_context])])
return ContextSet(FakeSequence(self.evaluator, 'list', [lazy_context]))
@register_builtin_method('items')
@_register_builtin_method('items')
def _imitate_items(self):
items = set(
items = ContextSet.from_iterable(
FakeSequence(
self.evaluator, 'tuple'
(context.LazyKnownContexts(keys), context.LazyKnownContexts(values))
@@ -385,7 +392,7 @@ class SequenceLiteralContext(ArrayMixin, AbstractSequence):
# Can raise an IndexError
if isinstance(index, slice):
return set([self])
return ContextSet(self)
else:
return self._defining_context.eval_node(self._items()[index])
@@ -396,7 +403,7 @@ class SequenceLiteralContext(ArrayMixin, AbstractSequence):
"""
if self.array_type == 'dict':
# Get keys.
types = set()
types = ContextSet()
for k, _ in self._items():
types |= self._defining_context.eval_node(k)
# We don't know which dict index comes first, therefore always
@@ -413,7 +420,7 @@ class SequenceLiteralContext(ArrayMixin, AbstractSequence):
def _values(self):
"""Returns a list of a list of node."""
if self.array_type == 'dict':
return unite(v for k, v in self._items())
return ContextSet.from_sets(v for k, v in self._items())
else:
return self._items()
@@ -451,14 +458,14 @@ class SequenceLiteralContext(ArrayMixin, AbstractSequence):
"""
for key_node, value in self._items():
for key in self._defining_context.eval_node(key_node):
if precedence.is_string(key):
if is_string(key):
yield key.obj, context.LazyTreeContext(self._defining_context, value)
def __repr__(self):
return "<%s of %s>" % (self.__class__.__name__, self.atom)
@has_builtin_methods
@_has_builtin_methods
class DictLiteralContext(SequenceLiteralContext):
array_type = 'dict'
@@ -467,12 +474,12 @@ class DictLiteralContext(SequenceLiteralContext):
self._defining_context = defining_context
self.atom = atom
@register_builtin_method('values')
@_register_builtin_method('values')
def _imitate_values(self):
lazy_context = context.LazyKnownContexts(self.dict_values())
return set([FakeSequence(self.evaluator, 'list', [lazy_context])])
return ContextSet(FakeSequence(self.evaluator, 'list', [lazy_context]))
@register_builtin_method('items')
@_register_builtin_method('items')
def _imitate_items(self):
lazy_contexts = [
context.LazyKnownContext(FakeSequence(
@@ -482,7 +489,7 @@ class DictLiteralContext(SequenceLiteralContext):
)) for key_node, value_node in self._items()
]
return set([FakeSequence(self.evaluator, 'list', lazy_contexts)])
return ContextSet(FakeSequence(self.evaluator, 'list', lazy_contexts))
class _FakeArray(SequenceLiteralContext):
@@ -502,7 +509,7 @@ class FakeSequence(_FakeArray):
self._lazy_context_list = lazy_context_list
def py__getitem__(self, index):
return set(self._lazy_context_list[index].infer())
return self._lazy_context_list[index].infer()
def py__iter__(self):
return self._lazy_context_list
@@ -527,7 +534,7 @@ class FakeDict(_FakeArray):
return self._dct[index].infer()
def dict_values(self):
return unite(lazy_context.infer() for lazy_context in self._dct.values())
return ContextSet.from_sets(lazy_context.infer() for lazy_context in self._dct.values())
def exact_key_items(self):
return self._dct.items()
@@ -544,7 +551,7 @@ class MergedArray(_FakeArray):
yield lazy_context
def py__getitem__(self, index):
return unite(lazy_context.infer() for lazy_context in self.py__iter__())
return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
def _items(self):
for array in self._arrays:
@@ -568,7 +575,7 @@ def unpack_tuple_to_dict(context, types, exprlist):
dct = {}
parts = iter(exprlist.children[::2])
n = 0
for lazy_context in py__iter__(context.evaluator, types, exprlist):
for lazy_context in types.iterate(exprlist):
n += 1
try:
part = next(parts)
@@ -595,103 +602,16 @@ def unpack_tuple_to_dict(context, types, exprlist):
raise NotImplementedError
def py__iter__(evaluator, types, contextualized_node=None):
debug.dbg('py__iter__')
type_iters = []
for typ in types:
try:
iter_method = typ.py__iter__
except AttributeError:
if contextualized_node is not None:
analysis.add(
contextualized_node.context,
'type-error-not-iterable',
contextualized_node._node,
message="TypeError: '%s' object is not iterable" % typ)
else:
type_iters.append(iter_method())
for lazy_contexts in zip_longest(*type_iters):
yield context.get_merged_lazy_context(
[l for l in lazy_contexts if l is not None]
)
def py__iter__types(evaluator, types, contextualized_node=None):
"""
Calls `py__iter__`, but ignores the ordering in the end and just returns
all types that it contains.
"""
return unite(
lazy_context.infer()
for lazy_context in py__iter__(evaluator, types, contextualized_node)
)
def py__getitem__(evaluator, context, types, trailer):
from jedi.evaluate.representation import ClassContext
from jedi.evaluate.instance import TreeInstance
result = set()
trailer_op, node, trailer_cl = trailer.children
assert trailer_op == "["
assert trailer_cl == "]"
# special case: PEP0484 typing module, see
# https://github.com/davidhalter/jedi/issues/663
for typ in list(types):
if isinstance(typ, (ClassContext, TreeInstance)):
typing_module_types = pep0484.py__getitem__(context, typ, node)
if typing_module_types is not None:
types.remove(typ)
result |= typing_module_types
if not types:
# all consumed by special cases
return result
for index in create_index_types(evaluator, context, node):
if isinstance(index, (compiled.CompiledObject, Slice)):
index = index.obj
if type(index) not in (float, int, str, unicode, slice, type(Ellipsis)):
# If the index is not clearly defined, we have to get all the
# possiblities.
for typ in list(types):
if isinstance(typ, AbstractSequence) and typ.array_type == 'dict':
types.remove(typ)
result |= typ.dict_values()
return result | py__iter__types(evaluator, types)
for typ in types:
# The actual getitem call.
try:
getitem = typ.py__getitem__
except AttributeError:
# TODO this context is probably not right.
analysis.add(context, 'type-error-not-subscriptable', trailer_op,
message="TypeError: '%s' object is not subscriptable" % typ)
else:
try:
result |= getitem(index)
except IndexError:
result |= py__iter__types(evaluator, set([typ]))
except KeyError:
# Must be a dict. Lists don't raise KeyErrors.
result |= typ.dict_values()
return result
def check_array_additions(context, sequence):
""" Just a mapper function for the internal _check_array_additions """
if sequence.array_type not in ('list', 'set'):
# TODO also check for dict updates
return set()
return NO_CONTEXTS
return _check_array_additions(context, sequence)
@evaluator_method_cache(default=set())
@evaluator_method_cache(default=NO_CONTEXTS)
@debug.increase_indent
def _check_array_additions(context, sequence):
"""
@@ -706,7 +626,7 @@ def _check_array_additions(context, sequence):
module_context = context.get_root_context()
if not settings.dynamic_array_additions or isinstance(module_context, compiled.CompiledObject):
debug.dbg('Dynamic array search aborted.', color='MAGENTA')
return set()
return ContextSet()
def find_additions(context, arglist, add_name):
params = list(param.TreeArguments(context.evaluator, context, arglist).unpack())
@@ -714,11 +634,11 @@ def _check_array_additions(context, sequence):
if add_name in ['insert']:
params = params[1:]
if add_name in ['append', 'add', 'insert']:
for key, lazy_context in params:
result.add(lazy_context)
for key, whatever in params:
result.add(whatever)
elif add_name in ['extend', 'update']:
for key, lazy_context in params:
result |= set(py__iter__(context.evaluator, lazy_context.infer()))
result |= set(lazy_context.infer().iterate())
return result
temp_param_add, settings.dynamic_params_for_other_modules = \
@@ -781,7 +701,7 @@ def get_dynamic_array_instance(instance):
ai = _ArrayInstance(instance)
from jedi.evaluate import param
return param.ValuesArguments([[ai]])
return param.ValuesArguments([ContextSet(ai)])
class _ArrayInstance(object):
@@ -806,7 +726,7 @@ class _ArrayInstance(object):
except StopIteration:
pass
else:
for lazy in py__iter__(self.instance.evaluator, lazy_context.infer()):
for lazy in lazy_context.infer().iterate():
yield lazy
from jedi.evaluate import param
@@ -815,6 +735,9 @@ class _ArrayInstance(object):
for addition in additions:
yield addition
def iterate(self, contextualized_node=None):
return self.py__iter__()
class Slice(context.Context):
def __init__(self, context, start, stop, step):
@@ -852,33 +775,3 @@ class Slice(context.Context):
return slice(get(self._start), get(self._stop), get(self._step))
except IndexError:
return slice(None, None, None)
def create_index_types(evaluator, context, index):
"""
Handles slices in subscript nodes.
"""
if index == ':':
# Like array[:]
return set([Slice(context, None, None, None)])
elif index.type == 'subscript' and not index.children[0] == '.':
# subscript basically implies a slice operation, except for Python 2's
# Ellipsis.
# e.g. array[:3]
result = []
for el in index.children:
if el == ':':
if not result:
result.append(None)
elif el.type == 'sliceop':
if len(el.children) == 2:
result.append(el.children[1])
else:
result.append(el)
result += [None] * (3 - len(result))
return set([Slice(context, *result)])
# No slices
return context.eval_node(index)

View File

@@ -2,7 +2,7 @@ from collections import defaultdict
from jedi._compatibility import zip_longest
from jedi import debug
from jedi import common
from jedi.evaluate.utils import PushBackIterator
from parso.python import tree
from jedi.evaluate import iterable
from jedi.evaluate import analysis
@@ -10,6 +10,7 @@ from jedi.evaluate import context
from jedi.evaluate import docstrings
from jedi.evaluate import pep0484
from jedi.evaluate.filters import ParamName
from jedi.evaluate.context import NO_CONTEXTS
def add_argument_issue(parent_context, error_name, lazy_context, message):
@@ -51,7 +52,7 @@ class AbstractArguments():
debug.warning('TypeError: %s expected at least %s arguments, got %s',
name, len(parameters), i)
raise ValueError
values = set() if argument is None else argument.infer()
values = NO_CONTEXTS if argument is None else argument.infer()
if not values and not optional:
# For the stdlib we always want values. If we don't get them,
@@ -237,7 +238,7 @@ class ExecutedParam(object):
pep0484_hints = pep0484.infer_param(self._execution_context, self._param_node)
doc_params = docstrings.infer_param(self._execution_context, self._param_node)
if pep0484_hints or doc_params:
return list(set(pep0484_hints) | set(doc_params))
return pep0484_hints | doc_params
return self._lazy_context.infer()
@@ -258,7 +259,7 @@ def get_params(execution_context, var_args):
for param in funcdef.get_params():
param_dict[param.name.value] = param
unpacked_va = list(var_args.unpack(funcdef))
var_arg_iterator = common.PushBackIterator(iter(unpacked_va))
var_arg_iterator = PushBackIterator(iter(unpacked_va))
non_matching_keys = defaultdict(lambda: [])
keys_used = {}

View File

@@ -19,17 +19,15 @@ x support for type hint comments for functions, `# type: (int, str) -> int`.
See comment from Guido https://github.com/davidhalter/jedi/issues/662
"""
import itertools
import os
import re
from parso import ParserSyntaxError
from parso.python import tree
from jedi.common import unite
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate import compiled
from jedi.evaluate.context import LazyTreeContext
from jedi.evaluate.context import LazyTreeContext, NO_CONTEXTS, ContextSet
from jedi import debug
from jedi import _compatibility
from jedi import parser_utils
@@ -42,16 +40,15 @@ def _evaluate_for_annotation(context, annotation, index=None):
and we're interested in that index
"""
if annotation is not None:
definitions = context.eval_node(
_fix_forward_reference(context, annotation))
context_set = context.eval_node(_fix_forward_reference(context, annotation))
if index is not None:
definitions = list(itertools.chain.from_iterable(
definition.py__getitem__(index) for definition in definitions
if definition.array_type == 'tuple' and
len(list(definition.py__iter__())) >= index))
return unite(d.execute_evaluated() for d in definitions)
context_set = context_set.filter(
lambda context: context.array_type == 'tuple' \
and len(list(context.py__iter__())) >= index
).py__getitem__(index)
return context_set.execute_evaluated()
else:
return set()
return NO_CONTEXTS
def _fix_forward_reference(context, node):
@@ -147,7 +144,7 @@ def py__getitem__(context, typ, node):
if type_name in ("Union", '_Union'):
# In Python 3.6 it's still called typing.Union but it's an instance
# called _Union.
return unite(context.eval_node(node) for node in nodes)
return ContextSet.from_sets(context.eval_node(node) for node in nodes)
if type_name in ("Optional", '_Optional'):
# Here we have the same issue like in Union. Therefore we also need to
# check for the instance typing._Optional (Python 3.6).

View File

@@ -1,178 +0,0 @@
"""
Handles operator precedence.
"""
import operator as op
from jedi._compatibility import unicode
from jedi import debug
from jedi.evaluate.compiled import CompiledObject, create, builtin_from_name
from jedi.evaluate import analysis
# Maps Python syntax to the operator module.
COMPARISON_OPERATORS = {
'==': op.eq,
'!=': op.ne,
'is': op.is_,
'is not': op.is_not,
'<': op.lt,
'<=': op.le,
'>': op.gt,
'>=': op.ge,
}
def literals_to_types(evaluator, result):
# Changes literals ('a', 1, 1.0, etc) to its type instances (str(),
# int(), float(), etc).
new_result = set()
for typ in result:
if is_literal(typ):
# Literals are only valid as long as the operations are
# correct. Otherwise add a value-free instance.
cls = builtin_from_name(evaluator, typ.name.string_name)
new_result |= cls.execute_evaluated()
else:
new_result.add(typ)
return new_result
def calculate_children(evaluator, context, children):
"""
Calculate a list of children with operators.
"""
iterator = iter(children)
types = context.eval_node(next(iterator))
for operator in iterator:
right = next(iterator)
if operator.type == 'comp_op': # not in / is not
operator = ' '.join(c.value for c in operator.children)
# handle lazy evaluation of and/or here.
if operator in ('and', 'or'):
left_bools = set([left.py__bool__() for left in types])
if left_bools == set([True]):
if operator == 'and':
types = context.eval_node(right)
elif left_bools == set([False]):
if operator != 'and':
types = context.eval_node(right)
# Otherwise continue, because of uncertainty.
else:
types = calculate(evaluator, context, types, operator,
context.eval_node(right))
debug.dbg('calculate_children types %s', types)
return types
def calculate(evaluator, context, left_result, operator, right_result):
result = set()
if not left_result or not right_result:
# illegal slices e.g. cause left/right_result to be None
result = (left_result or set()) | (right_result or set())
result = literals_to_types(evaluator, result)
else:
# I don't think there's a reasonable chance that a string
# operation is still correct, once we pass something like six
# objects.
if len(left_result) * len(right_result) > 6:
result = literals_to_types(evaluator, left_result | right_result)
else:
for left in left_result:
for right in right_result:
result |= _element_calculate(evaluator, context, left, operator, right)
return result
def factor_calculate(evaluator, types, operator):
"""
Calculates `+`, `-`, `~` and `not` prefixes.
"""
for typ in types:
if operator == '-':
if _is_number(typ):
yield create(evaluator, -typ.obj)
elif operator == 'not':
value = typ.py__bool__()
if value is None: # Uncertainty.
return
yield create(evaluator, not value)
else:
yield typ
def _is_number(obj):
return isinstance(obj, CompiledObject) \
and isinstance(obj.obj, (int, float))
def is_string(obj):
return isinstance(obj, CompiledObject) \
and isinstance(obj.obj, (str, unicode))
def is_literal(obj):
return _is_number(obj) or is_string(obj)
def _is_tuple(obj):
from jedi.evaluate import iterable
return isinstance(obj, iterable.AbstractSequence) and obj.array_type == 'tuple'
def _is_list(obj):
from jedi.evaluate import iterable
return isinstance(obj, iterable.AbstractSequence) and obj.array_type == 'list'
def _element_calculate(evaluator, context, left, operator, right):
from jedi.evaluate import iterable, instance
l_is_num = _is_number(left)
r_is_num = _is_number(right)
if operator == '*':
# for iterables, ignore * operations
if isinstance(left, iterable.AbstractSequence) or is_string(left):
return set([left])
elif isinstance(right, iterable.AbstractSequence) or is_string(right):
return set([right])
elif operator == '+':
if l_is_num and r_is_num or is_string(left) and is_string(right):
return set([create(evaluator, left.obj + right.obj)])
elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right):
return set([iterable.MergedArray(evaluator, (left, right))])
elif operator == '-':
if l_is_num and r_is_num:
return set([create(evaluator, left.obj - right.obj)])
elif operator == '%':
# With strings and numbers the left type typically remains. Except for
# `int() % float()`.
return set([left])
elif operator in COMPARISON_OPERATORS:
operation = COMPARISON_OPERATORS[operator]
if isinstance(left, CompiledObject) and isinstance(right, CompiledObject):
# Possible, because the return is not an option. Just compare.
left = left.obj
right = right.obj
try:
result = operation(left, right)
except TypeError:
# Could be True or False.
return set([create(evaluator, True), create(evaluator, False)])
else:
return set([create(evaluator, result)])
elif operator == 'in':
return set()
def check(obj):
"""Checks if a Jedi object is either a float or an int."""
return isinstance(obj, instance.CompiledInstance) and \
obj.name.string_name in ('int', 'float')
# Static analysis, one is a number, the other one is not.
if operator in ('+', '-') and l_is_num != r_is_num \
and not (check(left) or check(right)):
message = "TypeError: unsupported operand type(s) for +: %s and %s"
analysis.add(context, 'type-error-operation', operator,
message % (left, right))
return set([left, right])

View File

@@ -29,6 +29,7 @@ therefore the quality might not always be maximal.
from contextlib import contextmanager
from jedi import debug
from jedi.evaluate.context import NO_CONTEXTS
recursion_limit = 15
@@ -71,7 +72,7 @@ def execution_allowed(evaluator, node):
pushed_nodes.pop()
def execution_recursion_decorator(default=set()):
def execution_recursion_decorator(default=NO_CONTEXTS):
def decorator(func):
def wrapper(execution, **kwargs):
detector = execution.evaluator.execution_recursion_detector

View File

@@ -51,19 +51,20 @@ from jedi import debug
from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass
from jedi.evaluate import compiled
from jedi.evaluate import recursion
from jedi.evaluate import iterable
from jedi.evaluate import docstrings
from jedi.evaluate import pep0484
from jedi.evaluate import param
from jedi.evaluate import flow_analysis
from jedi.evaluate import imports
from jedi.evaluate import helpers
from jedi.evaluate import iterable
from jedi.evaluate.filters import ParserTreeFilter, FunctionExecutionFilter, \
GlobalNameFilter, DictFilter, ContextName, AbstractNameDefinition, \
ParamName, AnonymousInstanceParamName, TreeNameDefinition, \
ContextNameMixin
from jedi.evaluate import context
from jedi.evaluate.context import ContextualizedNode
from jedi.evaluate.context import ContextualizedNode, NO_CONTEXTS, \
ContextSet, iterator_to_context_set
from jedi import parser_utils
from jedi.evaluate.parser_cache import get_yield_exprs
@@ -83,12 +84,13 @@ class ClassName(TreeNameDefinition):
super(ClassName, self).__init__(parent_context, tree_name)
self._name_context = name_context
@iterator_to_context_set
def infer(self):
# TODO this _name_to_types might get refactored and be a part of the
# parent class. Once it is, we can probably just overwrite method to
# achieve this.
from jedi.evaluate.finder import _name_to_types
inferred = _name_to_types(
from jedi.evaluate.syntax_tree import tree_name_to_contexts
inferred = tree_name_to_contexts(
self.parent_context.evaluator, self._name_context, self.tree_name)
for result_context in inferred:
@@ -111,7 +113,7 @@ class ClassContext(use_metaclass(CachedMetaClass, context.TreeContext)):
"""
api_type = 'class'
def __init__(self, evaluator, classdef, parent_context):
def __init__(self, evaluator, parent_context, classdef):
super(ClassContext, self).__init__(evaluator, parent_context=parent_context)
self.tree_node = classdef
@@ -162,7 +164,7 @@ class ClassContext(use_metaclass(CachedMetaClass, context.TreeContext)):
def py__call__(self, params):
from jedi.evaluate.instance import TreeInstance
return set([TreeInstance(self.evaluator, self.parent_context, self, params)])
return ContextSet(TreeInstance(self.evaluator, self.parent_context, self, params))
def py__class__(self):
return compiled.create(self.evaluator, type)
@@ -227,7 +229,7 @@ class LambdaName(AbstractNameDefinition):
return self._lambda_context.tree_node.start_pos
def infer(self):
return set([self._lambda_context])
return ContextSet(self._lambda_context)
class FunctionContext(use_metaclass(CachedMetaClass, context.TreeContext)):
@@ -260,7 +262,7 @@ class FunctionContext(use_metaclass(CachedMetaClass, context.TreeContext)):
"""
yield_exprs = get_yield_exprs(self.evaluator, self.tree_node)
if yield_exprs:
return set([iterable.Generator(self.evaluator, function_execution)])
return ContextSet(iterable.Generator(self.evaluator, function_execution))
else:
return function_execution.get_return_values()
@@ -312,7 +314,7 @@ class FunctionExecutionContext(context.TreeContext):
self.tree_node = function_context.tree_node
self.var_args = var_args
@evaluator_method_cache(default=set())
@evaluator_method_cache(default=NO_CONTEXTS)
@recursion.execution_recursion_decorator()
def get_return_values(self, check_yields=False):
funcdef = self.tree_node
@@ -320,12 +322,12 @@ class FunctionExecutionContext(context.TreeContext):
return self.evaluator.eval_element(self, funcdef.children[-1])
if check_yields:
types = set()
context_set = NO_CONTEXTS
returns = get_yield_exprs(self.evaluator, funcdef)
else:
returns = funcdef.iter_return_stmts()
types = set(docstrings.infer_return_types(self.function_context))
types |= set(pep0484.infer_return_types(self.function_context))
context_set = docstrings.infer_return_types(self.function_context)
context_set |= pep0484.infer_return_types(self.function_context)
for r in returns:
check = flow_analysis.reachability_check(self, funcdef, r)
@@ -333,18 +335,21 @@ class FunctionExecutionContext(context.TreeContext):
debug.dbg('Return unreachable: %s', r)
else:
if check_yields:
types |= set(self._eval_yield(r))
context_set |= ContextSet.from_sets(
lazy_context.infer()
for lazy_context in self._eval_yield(r)
)
else:
try:
children = r.children
except AttributeError:
types.add(compiled.create(self.evaluator, None))
context_set |= ContextSet(compiled.create(self.evaluator, None))
else:
types |= self.eval_node(children[1])
context_set |= self.eval_node(children[1])
if check is flow_analysis.REACHABLE:
debug.dbg('Return reachable: %s', r)
break
return types
return context_set
def _eval_yield(self, yield_expr):
if yield_expr.type == 'keyword':
@@ -355,7 +360,7 @@ class FunctionExecutionContext(context.TreeContext):
node = yield_expr.children[1]
if node.type == 'yield_arg': # It must be a yield from.
cn = ContextualizedNode(self, node.children[1])
for lazy_context in iterable.py__iter__(self.evaluator, cn.infer(), cn):
for lazy_context in cn.infer().iterate(cn):
yield lazy_context
else:
yield context.LazyTreeContext(self, node)
@@ -386,11 +391,10 @@ class FunctionExecutionContext(context.TreeContext):
else:
types = self.get_return_values(check_yields=True)
if types:
yield context.get_merged_lazy_context(list(types))
yield context.LazyKnownContexts(types)
return
last_for_stmt = for_stmt
evaluator = self.evaluator
for for_stmt, yields in yields_order:
if for_stmt is None:
# No for_stmt, just normal yields.
@@ -400,7 +404,7 @@ class FunctionExecutionContext(context.TreeContext):
else:
input_node = for_stmt.get_testlist()
cn = ContextualizedNode(self, input_node)
ordered = iterable.py__iter__(evaluator, cn.infer(), cn)
ordered = cn.infer().iterate(cn)
ordered = list(ordered)
for lazy_context in ordered:
dct = {str(for_stmt.children[1].value): lazy_context.infer()}
@@ -430,9 +434,7 @@ class ModuleAttributeName(AbstractNameDefinition):
self.string_name = string_name
def infer(self):
return compiled.create(self.parent_context.evaluator, str).execute(
param.ValuesArguments([])
)
return compiled.create(self.parent_context.evaluator, str).execute_evaluated()
class ModuleName(ContextNameMixin, AbstractNameDefinition):
@@ -628,7 +630,7 @@ class ImplicitNSName(AbstractNameDefinition):
self.string_name = string_name
def infer(self):
return []
return NO_CONTEXTS
def get_root_context(self):
return self.implicit_ns_context

View File

@@ -12,7 +12,6 @@ compiled module that returns the types for C-builtins.
import collections
import re
from jedi.common import unite
from jedi.evaluate import compiled
from jedi.evaluate import representation as er
from jedi.evaluate.instance import InstanceFunctionExecution, \
@@ -20,10 +19,11 @@ from jedi.evaluate.instance import InstanceFunctionExecution, \
AnonymousInstanceFunctionExecution
from jedi.evaluate import iterable
from jedi import debug
from jedi.evaluate import precedence
from jedi.evaluate import param
from jedi.evaluate import analysis
from jedi.evaluate.context import LazyTreeContext, ContextualizedNode
from jedi.evaluate.context import LazyTreeContext, ContextualizedNode, \
NO_CONTEXTS, ContextSet
from jedi.evaluate.syntax_tree import is_string
# Now this is all part of fake tuples in Jedi. However super doesn't work on
# __init__ and __new__ doesn't work at all. So adding this to nametuples is
@@ -77,7 +77,7 @@ def _follow_param(evaluator, arguments, index):
try:
key, lazy_context = list(arguments.unpack())[index]
except IndexError:
return set()
return NO_CONTEXTS
else:
return lazy_context.infer()
@@ -109,7 +109,7 @@ def argument_clinic(string, want_obj=False, want_context=False, want_arguments=F
try:
lst = list(arguments.eval_argument_clinic(clinic_args))
except ValueError:
return set()
return NO_CONTEXTS
else:
kwargs = {}
if want_context:
@@ -137,15 +137,16 @@ def builtins_next(evaluator, iterators, defaults):
else:
name = '__next__'
types = set()
context_set = NO_CONTEXTS
for iterator in iterators:
if isinstance(iterator, AbstractInstanceContext):
for filter in iterator.get_filters(include_self_names=True):
for n in filter.get(name):
for context in n.infer():
types |= context.execute_evaluated()
if types:
return types
context_set = ContextSet.from_sets(
n.infer()
for filter in iterator.get_filters(include_self_names=True)
for n in filter.get(name)
).execute_evaluated()
if context_set:
return context_set
return defaults
@@ -154,21 +155,21 @@ def builtins_getattr(evaluator, objects, names, defaults=None):
# follow the first param
for obj in objects:
for name in names:
if precedence.is_string(name):
if is_string(name):
return obj.py__getattribute__(name.obj)
else:
debug.warning('getattr called without str')
continue
return set()
return NO_CONTEXTS
@argument_clinic('object[, bases, dict], /')
def builtins_type(evaluator, objects, bases, dicts):
if bases or dicts:
# It's a type creation... maybe someday...
return set()
return NO_CONTEXTS
else:
return set([o.py__class__() for o in objects])
return objects.py__class__()
class SuperInstance(AbstractInstanceContext):
@@ -184,8 +185,8 @@ def builtins_super(evaluator, types, objects, context):
if isinstance(context, (InstanceFunctionExecution,
AnonymousInstanceFunctionExecution)):
su = context.instance.py__class__().py__bases__()
return unite(context.execute_evaluated() for context in su[0].infer())
return set()
return su[0].infer().execute_evaluated()
return NO_CONTEXTS
@argument_clinic('sequence, /', want_obj=True, want_arguments=True)
@@ -198,7 +199,7 @@ def builtins_reversed(evaluator, sequences, obj, arguments):
if isinstance(lazy_context, LazyTreeContext):
# TODO access private
cn = ContextualizedNode(lazy_context._context, lazy_context.data)
ordered = list(iterable.py__iter__(evaluator, sequences, cn))
ordered = list(sequences.iterate(cn))
rev = list(reversed(ordered))
# Repack iterator values and then run it the normal way. This is
@@ -206,13 +207,13 @@ def builtins_reversed(evaluator, sequences, obj, arguments):
# would fail in certain cases like `reversed(x).__iter__` if we
# just returned the result directly.
seq = iterable.FakeSequence(evaluator, 'list', rev)
arguments = param.ValuesArguments([[seq]])
return set([CompiledInstance(evaluator, evaluator.BUILTINS, obj, arguments)])
arguments = param.ValuesArguments([ContextSet(seq)])
return ContextSet(CompiledInstance(evaluator, evaluator.BUILTINS, obj, arguments))
@argument_clinic('obj, type, /', want_arguments=True)
def builtins_isinstance(evaluator, objects, types, arguments):
bool_results = set([])
bool_results = set()
for o in objects:
try:
mro_func = o.py__class__().py__mro__
@@ -220,7 +221,7 @@ def builtins_isinstance(evaluator, objects, types, arguments):
# This is temporary. Everything should have a class attribute in
# Python?! Maybe we'll leave it here, because some numpy objects or
# whatever might not.
return set([compiled.create(True), compiled.create(False)])
return ContextSet(compiled.create(True), compiled.create(False))
mro = mro_func()
@@ -230,9 +231,9 @@ def builtins_isinstance(evaluator, objects, types, arguments):
elif cls_or_tup.name.string_name == 'tuple' \
and cls_or_tup.get_root_context() == evaluator.BUILTINS:
# Check for tuples.
classes = unite(
classes = ContextSet.from_sets(
lazy_context.infer()
for lazy_context in cls_or_tup.py__iter__()
for lazy_context in cls_or_tup.iterate()
)
bool_results.add(any(cls in mro for cls in classes))
else:
@@ -244,7 +245,7 @@ def builtins_isinstance(evaluator, objects, types, arguments):
'not %s.' % cls_or_tup
analysis.add(lazy_context._context, 'type-error-isinstance', node, message)
return set(compiled.create(evaluator, x) for x in bool_results)
return ContextSet.from_iterable(compiled.create(evaluator, x) for x in bool_results)
def collections_namedtuple(evaluator, obj, arguments):
@@ -259,7 +260,7 @@ def collections_namedtuple(evaluator, obj, arguments):
"""
# Namedtuples are not supported on Python 2.6
if not hasattr(collections, '_class_template'):
return set()
return NO_CONTEXTS
# Process arguments
# TODO here we only use one of the types, we should use all.
@@ -274,7 +275,7 @@ def collections_namedtuple(evaluator, obj, arguments):
for v in lazy_context.infer() if hasattr(v, 'obj')
]
else:
return set()
return NO_CONTEXTS
base = collections._class_template
base += _NAMEDTUPLE_INIT
@@ -293,7 +294,7 @@ def collections_namedtuple(evaluator, obj, arguments):
module = evaluator.grammar.parse(source)
generated_class = next(module.iter_classdefs())
parent_context = er.ModuleContext(evaluator, module, '')
return set([er.ClassContext(evaluator, generated_class, parent_context)])
return ContextSet(er.ClassContext(evaluator, parent_context, generated_class))
@argument_clinic('first, /')
@@ -314,8 +315,8 @@ _implemented = {
'deepcopy': _return_first_param,
},
'json': {
'load': lambda *args: set(),
'loads': lambda *args: set(),
'load': lambda *args: NO_CONTEXTS,
'loads': lambda *args: NO_CONTEXTS,
},
'collections': {
'namedtuple': collections_namedtuple,

View File

@@ -0,0 +1,588 @@
"""
Functions evaluating the syntax tree.
"""
import copy
import operator as op
from parso.python import tree
from jedi import debug
from jedi import parser_utils
from jedi.evaluate.context import ContextSet, NO_CONTEXTS, ContextualizedNode, \
ContextualizedName, iterator_to_context_set, iterate_contexts
from jedi.evaluate import compiled
from jedi.evaluate import pep0484
from jedi.evaluate import recursion
from jedi.evaluate import helpers
from jedi.evaluate import analysis
from jedi.evaluate import iterable
from jedi.evaluate import imports
from jedi.evaluate import param
from jedi.evaluate import representation as er
from jedi.evaluate.instance import TreeInstance, CompiledInstance
from jedi.evaluate.finder import NameFinder
from jedi.evaluate.helpers import is_string, is_literal, is_number, is_compiled
def _limit_context_infers(func):
"""
This is for now the way how we limit type inference going wild. There are
other ways to ensure recursion limits as well. This is mostly necessary
because of instance (self) access that can be quite tricky to limit.
I'm still not sure this is the way to go, but it looks okay for now and we
can still go anther way in the future. Tests are there. ~ dave
"""
def wrapper(context, *args, **kwargs):
n = context.tree_node
evaluator = context.evaluator
try:
evaluator.inferred_element_counts[n] += 1
if evaluator.inferred_element_counts[n] > 300:
debug.warning('In context %s there were too many inferences.', n)
return NO_CONTEXTS
except KeyError:
evaluator.inferred_element_counts[n] = 1
return func(context, *args, **kwargs)
return wrapper
@debug.increase_indent
@_limit_context_infers
def eval_node(context, element):
debug.dbg('eval_element %s@%s', element, element.start_pos)
evaluator = context.evaluator
typ = element.type
if typ in ('name', 'number', 'string', 'atom'):
return eval_atom(context, element)
elif typ == 'keyword':
# For False/True/None
if element.value in ('False', 'True', 'None'):
return ContextSet(compiled.builtin_from_name(evaluator, element.value))
# else: print e.g. could be evaluated like this in Python 2.7
return NO_CONTEXTS
elif typ == 'lambdef':
return ContextSet(er.FunctionContext(evaluator, context, element))
elif typ == 'expr_stmt':
return eval_expr_stmt(context, element)
elif typ in ('power', 'atom_expr'):
first_child = element.children[0]
if not (first_child.type == 'keyword' and first_child.value == 'await'):
context_set = eval_atom(context, first_child)
for trailer in element.children[1:]:
if trailer == '**': # has a power operation.
right = evaluator.eval_element(context, element.children[2])
context_set = _eval_comparison(
evaluator,
context,
context_set,
trailer,
right
)
break
context_set = eval_trailer(context, context_set, trailer)
return context_set
return NO_CONTEXTS
elif typ in ('testlist_star_expr', 'testlist',):
# The implicit tuple in statements.
return ContextSet(iterable.SequenceLiteralContext(evaluator, context, element))
elif typ in ('not_test', 'factor'):
context_set = context.eval_node(element.children[-1])
for operator in element.children[:-1]:
context_set = eval_factor(context_set, operator)
return context_set
elif typ == 'test':
# `x if foo else y` case.
return (context.eval_node(element.children[0]) |
context.eval_node(element.children[-1]))
elif typ == 'operator':
# Must be an ellipsis, other operators are not evaluated.
# In Python 2 ellipsis is coded as three single dot tokens, not
# as one token 3 dot token.
assert element.value in ('.', '...')
return ContextSet(compiled.create(evaluator, Ellipsis))
elif typ == 'dotted_name':
context_set = eval_atom(context, element.children[0])
for next_name in element.children[2::2]:
# TODO add search_global=True?
context_set = context_set.py__getattribute__(next_name, name_context=context)
return context_set
elif typ == 'eval_input':
return eval_node(context, element.children[0])
elif typ == 'annassign':
return pep0484._evaluate_for_annotation(context, element.children[1])
else:
return eval_or_test(context, element)
def eval_trailer(context, base_contexts, trailer):
trailer_op, node = trailer.children[:2]
if node == ')': # `arglist` is optional.
node = ()
if trailer_op == '[':
trailer_op, node, _ = trailer.children
# TODO It's kind of stupid to cast this from a context set to a set.
foo = set(base_contexts)
# special case: PEP0484 typing module, see
# https://github.com/davidhalter/jedi/issues/663
result = ContextSet()
for typ in list(foo):
if isinstance(typ, (er.ClassContext, TreeInstance)):
typing_module_types = pep0484.py__getitem__(context, typ, node)
if typing_module_types is not None:
foo.remove(typ)
result |= typing_module_types
return result | base_contexts.get_item(
eval_subscript_list(context.evaluator, context, node),
ContextualizedNode(context, trailer)
)
else:
debug.dbg('eval_trailer: %s in %s', trailer, base_contexts)
if trailer_op == '.':
return base_contexts.py__getattribute__(
name_context=context,
name_or_str=node
)
else:
assert trailer_op == '('
arguments = param.TreeArguments(context.evaluator, context, node, trailer)
return base_contexts.execute(arguments)
def eval_atom(context, atom):
"""
Basically to process ``atom`` nodes. The parser sometimes doesn't
generate the node (because it has just one child). In that case an atom
might be a name or a literal as well.
"""
if atom.type == 'name':
# This is the first global lookup.
stmt = tree.search_ancestor(
atom, 'expr_stmt', 'lambdef'
) or atom
if stmt.type == 'lambdef':
stmt = atom
return context.py__getattribute__(
name_or_str=atom,
position=stmt.start_pos,
search_global=True
)
elif isinstance(atom, tree.Literal):
string = parser_utils.safe_literal_eval(atom.value)
return ContextSet(compiled.create(context.evaluator, string))
else:
c = atom.children
if c[0].type == 'string':
# Will be one string.
context_set = eval_atom(context, c[0])
for string in c[1:]:
right = eval_atom(context, string)
context_set = _eval_comparison(context.evaluator, context, context_set, '+', right)
return context_set
# Parentheses without commas are not tuples.
elif c[0] == '(' and not len(c) == 2 \
and not(c[1].type == 'testlist_comp' and
len(c[1].children) > 1):
return context.eval_node(c[1])
try:
comp_for = c[1].children[1]
except (IndexError, AttributeError):
pass
else:
if comp_for == ':':
# Dict comprehensions have a colon at the 3rd index.
try:
comp_for = c[1].children[3]
except IndexError:
pass
if comp_for.type == 'comp_for':
return ContextSet(iterable.Comprehension.from_atom(context.evaluator, context, atom))
# It's a dict/list/tuple literal.
array_node = c[1]
try:
array_node_c = array_node.children
except AttributeError:
array_node_c = []
if c[0] == '{' and (array_node == '}' or ':' in array_node_c):
context = iterable.DictLiteralContext(context.evaluator, context, atom)
else:
context = iterable.SequenceLiteralContext(context.evaluator, context, atom)
return ContextSet(context)
@_limit_context_infers
def eval_expr_stmt(context, stmt, seek_name=None):
with recursion.execution_allowed(context.evaluator, stmt) as allowed:
if allowed or context.get_root_context() == context.evaluator.BUILTINS:
return _eval_expr_stmt(context, stmt, seek_name)
return NO_CONTEXTS
@debug.increase_indent
def _eval_expr_stmt(context, stmt, seek_name=None):
"""
The starting point of the completion. A statement always owns a call
list, which are the calls, that a statement does. In case multiple
names are defined in the statement, `seek_name` returns the result for
this name.
:param stmt: A `tree.ExprStmt`.
"""
debug.dbg('eval_expr_stmt %s (%s)', stmt, seek_name)
rhs = stmt.get_rhs()
context_set = context.eval_node(rhs)
if seek_name:
c_node = ContextualizedName(context, seek_name)
context_set = check_tuple_assignments(context.evaluator, c_node, context_set)
first_operator = next(stmt.yield_operators(), None)
if first_operator not in ('=', None) and first_operator.type == 'operator':
# `=` is always the last character in aug assignments -> -1
operator = copy.copy(first_operator)
operator.value = operator.value[:-1]
name = stmt.get_defined_names()[0].value
left = context.py__getattribute__(
name, position=stmt.start_pos, search_global=True)
for_stmt = tree.search_ancestor(stmt, 'for_stmt')
if for_stmt is not None and for_stmt.type == 'for_stmt' and context_set \
and parser_utils.for_stmt_defines_one_name(for_stmt):
# Iterate through result and add the values, that's possible
# only in for loops without clutter, because they are
# predictable. Also only do it, if the variable is not a tuple.
node = for_stmt.get_testlist()
cn = ContextualizedNode(context, node)
ordered = list(cn.infer().iterate(cn))
for lazy_context in ordered:
dct = {for_stmt.children[1].value: lazy_context.infer()}
with helpers.predefine_names(context, for_stmt, dct):
t = context.eval_node(rhs)
left = _eval_comparison(context.evaluator, context, left, operator, t)
context_set = left
else:
context_set = _eval_comparison(context.evaluator, context, left, operator, context_set)
debug.dbg('eval_expr_stmt result %s', context_set)
return context_set
def eval_or_test(context, or_test):
iterator = iter(or_test.children)
types = context.eval_node(next(iterator))
for operator in iterator:
right = next(iterator)
if operator.type == 'comp_op': # not in / is not
operator = ' '.join(c.value for c in operator.children)
# handle lazy evaluation of and/or here.
if operator in ('and', 'or'):
left_bools = set(left.py__bool__() for left in types)
if left_bools == set([True]):
if operator == 'and':
types = context.eval_node(right)
elif left_bools == set([False]):
if operator != 'and':
types = context.eval_node(right)
# Otherwise continue, because of uncertainty.
else:
types = _eval_comparison(context.evaluator, context, types, operator,
context.eval_node(right))
debug.dbg('eval_or_test types %s', types)
return types
@iterator_to_context_set
def eval_factor(context_set, operator):
"""
Calculates `+`, `-`, `~` and `not` prefixes.
"""
for context in context_set:
if operator == '-':
if is_number(context):
yield compiled.create(context.evaluator, -context.obj)
elif operator == 'not':
value = context.py__bool__()
if value is None: # Uncertainty.
return
yield compiled.create(context.evaluator, not value)
else:
yield context
# Maps Python syntax to the operator module.
COMPARISON_OPERATORS = {
'==': op.eq,
'!=': op.ne,
'is': op.is_,
'is not': op.is_not,
'<': op.lt,
'<=': op.le,
'>': op.gt,
'>=': op.ge,
}
def _literals_to_types(evaluator, result):
# Changes literals ('a', 1, 1.0, etc) to its type instances (str(),
# int(), float(), etc).
new_result = NO_CONTEXTS
for typ in result:
if is_literal(typ):
# Literals are only valid as long as the operations are
# correct. Otherwise add a value-free instance.
cls = compiled.builtin_from_name(evaluator, typ.name.string_name)
new_result |= cls.execute_evaluated()
else:
new_result |= ContextSet(typ)
return new_result
def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts):
if not left_contexts or not right_contexts:
# illegal slices e.g. cause left/right_result to be None
result = (left_contexts or NO_CONTEXTS) | (right_contexts or NO_CONTEXTS)
return _literals_to_types(evaluator, result)
else:
# I don't think there's a reasonable chance that a string
# operation is still correct, once we pass something like six
# objects.
if len(left_contexts) * len(right_contexts) > 6:
return _literals_to_types(evaluator, left_contexts | right_contexts)
else:
return ContextSet.from_sets(
_eval_comparison_part(evaluator, context, left, operator, right)
for left in left_contexts
for right in right_contexts
)
def _is_tuple(context):
return isinstance(context, iterable.AbstractSequence) and context.array_type == 'tuple'
def _is_list(context):
return isinstance(context, iterable.AbstractSequence) and context.array_type == 'list'
def _eval_comparison_part(evaluator, context, left, operator, right):
l_is_num = is_number(left)
r_is_num = is_number(right)
if operator == '*':
# for iterables, ignore * operations
if isinstance(left, iterable.AbstractSequence) or is_string(left):
return ContextSet(left)
elif isinstance(right, iterable.AbstractSequence) or is_string(right):
return ContextSet(right)
elif operator == '+':
if l_is_num and r_is_num or is_string(left) and is_string(right):
return ContextSet(compiled.create(evaluator, left.obj + right.obj))
elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right):
return ContextSet(iterable.MergedArray(evaluator, (left, right)))
elif operator == '-':
if l_is_num and r_is_num:
return ContextSet(compiled.create(evaluator, left.obj - right.obj))
elif operator == '%':
# With strings and numbers the left type typically remains. Except for
# `int() % float()`.
return ContextSet(left)
elif operator in COMPARISON_OPERATORS:
operation = COMPARISON_OPERATORS[operator]
if is_compiled(left) and is_compiled(right):
# Possible, because the return is not an option. Just compare.
left = left.obj
right = right.obj
try:
result = operation(left, right)
except TypeError:
# Could be True or False.
return ContextSet(compiled.create(evaluator, True), compiled.create(evaluator, False))
else:
return ContextSet(compiled.create(evaluator, result))
elif operator == 'in':
return NO_CONTEXTS
def check(obj):
"""Checks if a Jedi object is either a float or an int."""
return isinstance(obj, CompiledInstance) and \
obj.name.string_name in ('int', 'float')
# Static analysis, one is a number, the other one is not.
if operator in ('+', '-') and l_is_num != r_is_num \
and not (check(left) or check(right)):
message = "TypeError: unsupported operand type(s) for +: %s and %s"
analysis.add(context, 'type-error-operation', operator,
message % (left, right))
return ContextSet(left, right)
def _remove_statements(evaluator, context, stmt, name):
"""
This is the part where statements are being stripped.
Due to lazy evaluation, statements like a = func; b = a; b() have to be
evaluated.
"""
pep0484_contexts = \
pep0484.find_type_from_comment_hint_assign(context, stmt, name)
if pep0484_contexts:
return pep0484_contexts
return eval_expr_stmt(context, stmt, seek_name=name)
def tree_name_to_contexts(evaluator, context, tree_name):
types = []
node = tree_name.get_definition(import_name_always=True)
if node is None:
node = tree_name.parent
if node.type == 'global_stmt':
context = evaluator.create_context(context, tree_name)
finder = NameFinder(evaluator, context, context, tree_name.value)
filters = finder.get_filters(search_global=True)
# For global_stmt lookups, we only need the first possible scope,
# which means the function itself.
filters = [next(filters)]
return finder.find(filters, attribute_lookup=False)
elif node.type not in ('import_from', 'import_name'):
raise ValueError("Should not happen.")
typ = node.type
if typ == 'for_stmt':
types = pep0484.find_type_from_comment_hint_for(context, node, tree_name)
if types:
return types
if typ == 'with_stmt':
types = pep0484.find_type_from_comment_hint_with(context, node, tree_name)
if types:
return types
if typ in ('for_stmt', 'comp_for'):
try:
types = context.predefined_names[node][tree_name.value]
except KeyError:
cn = ContextualizedNode(context, node.children[3])
for_types = iterate_contexts(cn.infer(), cn)
c_node = ContextualizedName(context, tree_name)
types = check_tuple_assignments(evaluator, c_node, for_types)
elif typ == 'expr_stmt':
types = _remove_statements(evaluator, context, node, tree_name)
elif typ == 'with_stmt':
context_managers = context.eval_node(node.get_test_node_from_name(tree_name))
enter_methods = context_managers.py__getattribute__('__enter__')
return enter_methods.execute_evaluated()
elif typ in ('import_from', 'import_name'):
types = imports.infer_import(context, tree_name)
elif typ in ('funcdef', 'classdef'):
types = _apply_decorators(context, node)
elif typ == 'try_stmt':
# TODO an exception can also be a tuple. Check for those.
# TODO check for types that are not classes and add it to
# the static analysis report.
exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling())
types = exceptions.execute_evaluated()
else:
raise ValueError("Should not happen.")
return types
def _apply_decorators(context, node):
"""
Returns the function, that should to be executed in the end.
This is also the places where the decorators are processed.
"""
if node.type == 'classdef':
decoratee_context = er.ClassContext(
context.evaluator,
parent_context=context,
classdef=node
)
else:
decoratee_context = er.FunctionContext(
context.evaluator,
parent_context=context,
funcdef=node
)
initial = values = ContextSet(decoratee_context)
for dec in reversed(node.get_decorators()):
debug.dbg('decorator: %s %s', dec, values)
dec_values = context.eval_node(dec.children[1])
trailer_nodes = dec.children[2:-1]
if trailer_nodes:
# Create a trailer and evaluate it.
trailer = tree.PythonNode('trailer', trailer_nodes)
trailer.parent = dec
dec_values = eval_trailer(context, dec_values, trailer)
if not len(dec_values):
debug.warning('decorator not found: %s on %s', dec, node)
return initial
values = dec_values.execute(param.ValuesArguments([values]))
if not len(values):
debug.warning('not possible to resolve wrappers found %s', node)
return initial
debug.dbg('decorator end %s', values)
return values
def check_tuple_assignments(evaluator, contextualized_name, context_set):
"""
Checks if tuples are assigned.
"""
lazy_context = None
for index, node in contextualized_name.assignment_indexes():
cn = ContextualizedNode(contextualized_name.context, node)
iterated = context_set.iterate(cn)
for _ in range(index + 1):
try:
lazy_context = next(iterated)
except StopIteration:
# We could do this with the default param in next. But this
# would allow this loop to run for a very long time if the
# index number is high. Therefore break if the loop is
# finished.
return ContextSet()
context_set = lazy_context.infer()
return context_set
def eval_subscript_list(evaluator, context, index):
"""
Handles slices in subscript nodes.
"""
if index == ':':
# Like array[:]
return ContextSet(iterable.Slice(context, None, None, None))
elif index.type == 'subscript' and not index.children[0] == '.':
# subscript basically implies a slice operation, except for Python 2's
# Ellipsis.
# e.g. array[:3]
result = []
for el in index.children:
if el == ':':
if not result:
result.append(None)
elif el.type == 'sliceop':
if len(el.children) == 2:
result.append(el.children[1])
else:
result.append(el)
result += [None] * (3 - len(result))
return ContextSet(iterable.Slice(context, *result))
# No slices
return context.eval_node(index)

View File

@@ -10,13 +10,13 @@ from jedi.evaluate.compiled import CompiledObject
from jedi.evaluate.context import ContextualizedNode
from jedi import settings
from jedi import debug
from jedi import common
from jedi.evaluate.utils import ignored
def get_venv_path(venv):
"""Get sys.path for specified virtual environment."""
sys_path = _get_venv_path_dirs(venv)
with common.ignored(ValueError):
with ignored(ValueError):
sys_path.remove('')
sys_path = _get_sys_path_with_egglinks(sys_path)
# As of now, get_venv_path_dirs does not scan built-in pythonpath and
@@ -120,10 +120,9 @@ def _paths_from_assignment(module_context, expr_stmt):
except AssertionError:
continue
from jedi.evaluate.iterable import py__iter__
from jedi.evaluate.precedence import is_string
from jedi.evaluate.syntax_tree import is_string
cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt)
for lazy_context in py__iter__(module_context.evaluator, cn.infer(), cn):
for lazy_context in cn.infer().iterate(cn):
for context in lazy_context.infer():
if is_string(context):
yield context.obj
@@ -194,7 +193,7 @@ def sys_path_with_modifications(evaluator, module_context):
curdir = os.path.abspath(os.curdir)
#TODO why do we need a chdir?
with common.ignored(OSError):
with ignored(OSError):
os.chdir(os.path.dirname(path))
buildout_script_paths = set()
@@ -246,7 +245,7 @@ def _detect_django_path(module_path):
result = []
for parent in traverse_parents(module_path):
with common.ignored(IOError):
with ignored(IOError):
with open(parent + os.path.sep + 'manage.py'):
debug.dbg('Found django path: %s', module_path)
result.append(parent)

View File

@@ -4,7 +4,17 @@ import contextlib
import functools
from jedi._compatibility import reraise
from jedi import settings
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
class UncaughtAttributeError(Exception):
@@ -78,16 +88,6 @@ class PushBackIterator(object):
return self.current
def indent_block(text, indention=' '):
"""This function indents a text block with a default of four spaces."""
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp
@contextlib.contextmanager
def ignored(*exceptions):
"""
@@ -100,12 +100,11 @@ def ignored(*exceptions):
pass
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def indent_block(text, indention=' '):
"""This function indents a text block with a default of four spaces."""
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp

View File

@@ -14,7 +14,6 @@ following functions (sometimes bug-prone):
"""
import difflib
from jedi import common
from parso import python_bytes_to_unicode, split_lines
from jedi.evaluate import helpers
@@ -165,38 +164,37 @@ def inline(script):
dct = {}
definitions = script.goto_assignments()
with common.ignored(AssertionError):
assert len(definitions) == 1
stmt = definitions[0]._definition
usages = script.usages()
inlines = [r for r in usages
if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos]
inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column),
reverse=True)
expression_list = stmt.expression_list()
# don't allow multiline refactorings for now.
assert stmt.start_pos[0] == stmt.end_pos[0]
index = stmt.start_pos[0] - 1
assert len(definitions) == 1
stmt = definitions[0]._definition
usages = script.usages()
inlines = [r for r in usages
if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos]
inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column),
reverse=True)
expression_list = stmt.expression_list()
# don't allow multiline refactorings for now.
assert stmt.start_pos[0] == stmt.end_pos[0]
index = stmt.start_pos[0] - 1
line = new_lines[index]
replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1]
replace_str = replace_str.strip()
# tuples need parentheses
if expression_list and isinstance(expression_list[0], pr.Array):
arr = expression_list[0]
if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1:
replace_str = '(%s)' % replace_str
line = new_lines[index]
replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1]
replace_str = replace_str.strip()
# tuples need parentheses
if expression_list and isinstance(expression_list[0], pr.Array):
arr = expression_list[0]
if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1:
replace_str = '(%s)' % replace_str
# if it's the only assignment, remove the statement
if len(stmt.get_defined_names()) == 1:
line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:]
# if it's the only assignment, remove the statement
if len(stmt.get_defined_names()) == 1:
line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:]
dct = _rename(inlines, replace_str)
# remove the empty line
new_lines = dct[script.path][2]
if line.strip():
new_lines[index] = line
else:
new_lines.pop(index)
dct = _rename(inlines, replace_str)
# remove the empty line
new_lines = dct[script.path][2]
if line.strip():
new_lines[index] = line
else:
new_lines.pop(index)
return Refactoring(dct)

View File

@@ -95,4 +95,4 @@ def test_time_docstring():
def test_dict_values():
assert Script('import sys/sys.modules["alshdb;lasdhf"]').goto_definitions()
assert Script('import sys\nsys.modules["alshdb;lasdhf"]').goto_definitions()