mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-07 06:24:27 +08:00
Evaluator -> InferState
This commit is contained in:
@@ -131,7 +131,7 @@ New APIs:
|
||||
++++++++++++++++++
|
||||
|
||||
- The import logic has been rewritten to look more like Python's. There is now
|
||||
an ``Evaluator.modules`` import cache, which resembles ``sys.modules``.
|
||||
an ``InferState.modules`` import cache, which resembles ``sys.modules``.
|
||||
- Integrated the parser of 2to3. This will make refactoring possible. It will
|
||||
also be possible to check for error messages (like compiling an AST would give)
|
||||
in the future.
|
||||
|
||||
@@ -28,7 +28,7 @@ from jedi.api import helpers
|
||||
from jedi.api.completion import Completion
|
||||
from jedi.api.environment import InterpreterEnvironment
|
||||
from jedi.api.project import get_default_project, Project
|
||||
from jedi.inference import Evaluator
|
||||
from jedi.inference import InferState
|
||||
from jedi.inference import imports
|
||||
from jedi.inference import usages
|
||||
from jedi.inference.arguments import try_iter_content
|
||||
@@ -111,11 +111,11 @@ class Script(object):
|
||||
# TODO deprecate and remove sys_path from the Script API.
|
||||
if sys_path is not None:
|
||||
project._sys_path = sys_path
|
||||
self._evaluator = Evaluator(
|
||||
self._infer_state = InferState(
|
||||
project, environment=environment, script_path=self.path
|
||||
)
|
||||
debug.speed('init')
|
||||
self._module_node, source = self._evaluator.parse_and_get_code(
|
||||
self._module_node, source = self._infer_state.parse_and_get_code(
|
||||
code=source,
|
||||
path=self.path,
|
||||
encoding=encoding,
|
||||
@@ -156,7 +156,7 @@ class Script(object):
|
||||
is_package = False
|
||||
if self.path is not None:
|
||||
import_names, is_p = transform_path_to_dotted(
|
||||
self._evaluator.get_sys_path(add_parent_paths=False),
|
||||
self._infer_state.get_sys_path(add_parent_paths=False),
|
||||
self.path
|
||||
)
|
||||
if import_names is not None:
|
||||
@@ -170,7 +170,7 @@ class Script(object):
|
||||
if self.path is not None and self.path.endswith('.pyi'):
|
||||
# We are in a stub file. Try to load the stub properly.
|
||||
stub_module = load_proper_stub_module(
|
||||
self._evaluator,
|
||||
self._infer_state,
|
||||
file_io,
|
||||
names,
|
||||
self._module_node
|
||||
@@ -182,21 +182,21 @@ class Script(object):
|
||||
names = ('__main__',)
|
||||
|
||||
module = ModuleContext(
|
||||
self._evaluator, self._module_node, file_io,
|
||||
self._infer_state, self._module_node, file_io,
|
||||
string_names=names,
|
||||
code_lines=self._code_lines,
|
||||
is_package=is_package,
|
||||
)
|
||||
if names[0] not in ('builtins', '__builtin__', 'typing'):
|
||||
# These modules are essential for Jedi, so don't overwrite them.
|
||||
self._evaluator.module_cache.add(names, ContextSet([module]))
|
||||
self._infer_state.module_cache.add(names, ContextSet([module]))
|
||||
return module
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s %r>' % (
|
||||
self.__class__.__name__,
|
||||
repr(self._orig_path),
|
||||
self._evaluator.environment,
|
||||
self._infer_state.environment,
|
||||
)
|
||||
|
||||
def completions(self):
|
||||
@@ -209,7 +209,7 @@ class Script(object):
|
||||
"""
|
||||
with debug.increase_indent_cm('completions'):
|
||||
completion = Completion(
|
||||
self._evaluator, self._get_module(), self._code_lines,
|
||||
self._infer_state, self._get_module(), self._code_lines,
|
||||
self._pos, self.call_signatures
|
||||
)
|
||||
return completion.completions()
|
||||
@@ -239,16 +239,16 @@ class Script(object):
|
||||
if leaf is None:
|
||||
return []
|
||||
|
||||
context = self._evaluator.create_context(self._get_module(), leaf)
|
||||
context = self._infer_state.create_context(self._get_module(), leaf)
|
||||
|
||||
contexts = helpers.infer_goto_definition(self._evaluator, context, leaf)
|
||||
contexts = helpers.infer_goto_definition(self._infer_state, context, leaf)
|
||||
contexts = convert_contexts(
|
||||
contexts,
|
||||
only_stubs=only_stubs,
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
|
||||
defs = [classes.Definition(self._evaluator, c.name) for c in contexts]
|
||||
defs = [classes.Definition(self._infer_state, c.name) for c in contexts]
|
||||
# The additional set here allows the definitions to become unique in an
|
||||
# API sense. In the internals we want to separate more things than in
|
||||
# the API.
|
||||
@@ -299,8 +299,8 @@ class Script(object):
|
||||
# Without a name we really just want to jump to the result e.g.
|
||||
# executed by `foo()`, if we the cursor is after `)`.
|
||||
return self.goto_definitions(only_stubs=only_stubs, prefer_stubs=prefer_stubs)
|
||||
context = self._evaluator.create_context(self._get_module(), tree_name)
|
||||
names = list(self._evaluator.goto(context, tree_name))
|
||||
context = self._infer_state.create_context(self._get_module(), tree_name)
|
||||
names = list(self._infer_state.goto(context, tree_name))
|
||||
|
||||
if follow_imports:
|
||||
names = filter_follow_imports(names, lambda name: name.is_import())
|
||||
@@ -310,7 +310,7 @@ class Script(object):
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
|
||||
defs = [classes.Definition(self._evaluator, d) for d in set(names)]
|
||||
defs = [classes.Definition(self._infer_state, d) for d in set(names)]
|
||||
return helpers.sorted_definitions(defs)
|
||||
|
||||
def usages(self, additional_module_paths=(), **kwargs):
|
||||
@@ -342,7 +342,7 @@ class Script(object):
|
||||
|
||||
names = usages.usages(self._get_module(), tree_name)
|
||||
|
||||
definitions = [classes.Definition(self._evaluator, n) for n in names]
|
||||
definitions = [classes.Definition(self._infer_state, n) for n in names]
|
||||
if not include_builtins:
|
||||
definitions = [d for d in definitions if not d.in_builtin_module()]
|
||||
return helpers.sorted_definitions(definitions)
|
||||
@@ -368,12 +368,12 @@ class Script(object):
|
||||
if call_details is None:
|
||||
return []
|
||||
|
||||
context = self._evaluator.create_context(
|
||||
context = self._infer_state.create_context(
|
||||
self._get_module(),
|
||||
call_details.bracket_leaf
|
||||
)
|
||||
definitions = helpers.cache_call_signatures(
|
||||
self._evaluator,
|
||||
self._infer_state,
|
||||
context,
|
||||
call_details.bracket_leaf,
|
||||
self._code_lines,
|
||||
@@ -383,19 +383,19 @@ class Script(object):
|
||||
|
||||
# TODO here we use stubs instead of the actual contexts. We should use
|
||||
# the signatures from stubs, but the actual contexts, probably?!
|
||||
return [classes.CallSignature(self._evaluator, signature, call_details)
|
||||
return [classes.CallSignature(self._infer_state, signature, call_details)
|
||||
for signature in definitions.get_signatures()]
|
||||
|
||||
def _analysis(self):
|
||||
self._evaluator.is_analysis = True
|
||||
self._evaluator.analysis_modules = [self._module_node]
|
||||
self._infer_state.is_analysis = True
|
||||
self._infer_state.analysis_modules = [self._module_node]
|
||||
module = self._get_module()
|
||||
try:
|
||||
for node in get_executable_nodes(self._module_node):
|
||||
context = module.create_context(node)
|
||||
if node.type in ('funcdef', 'classdef'):
|
||||
# Resolve the decorators.
|
||||
tree_name_to_contexts(self._evaluator, context, node.children[1])
|
||||
tree_name_to_contexts(self._infer_state, context, node.children[1])
|
||||
elif isinstance(node, tree.Import):
|
||||
import_names = set(node.get_defined_names())
|
||||
if node.is_nested():
|
||||
@@ -409,16 +409,16 @@ class Script(object):
|
||||
unpack_tuple_to_dict(context, types, testlist)
|
||||
else:
|
||||
if node.type == 'name':
|
||||
defs = self._evaluator.goto_definitions(context, node)
|
||||
defs = self._infer_state.goto_definitions(context, node)
|
||||
else:
|
||||
defs = infer_call_of_leaf(context, node)
|
||||
try_iter_content(defs)
|
||||
self._evaluator.reset_recursion_limitations()
|
||||
self._infer_state.reset_recursion_limitations()
|
||||
|
||||
ana = [a for a in self._evaluator.analysis if self.path == a.path]
|
||||
ana = [a for a in self._infer_state.analysis if self.path == a.path]
|
||||
return sorted(set(ana), key=lambda x: x.line)
|
||||
finally:
|
||||
self._evaluator.is_analysis = False
|
||||
self._infer_state.is_analysis = False
|
||||
|
||||
|
||||
class Interpreter(Script):
|
||||
@@ -467,11 +467,11 @@ class Interpreter(Script):
|
||||
super(Interpreter, self).__init__(source, environment=environment,
|
||||
_project=Project(os.getcwd()), **kwds)
|
||||
self.namespaces = namespaces
|
||||
self._evaluator.allow_descriptor_getattr = self._allow_descriptor_getattr_default
|
||||
self._infer_state.allow_descriptor_getattr = self._allow_descriptor_getattr_default
|
||||
|
||||
def _get_module(self):
|
||||
return interpreter.MixedModuleContext(
|
||||
self._evaluator,
|
||||
self._infer_state,
|
||||
self._module_node,
|
||||
self.namespaces,
|
||||
file_io=KnownContentFileIO(self.path, self._code),
|
||||
@@ -514,7 +514,7 @@ def names(source=None, path=None, encoding='utf-8', all_scopes=False,
|
||||
module_context = script._get_module()
|
||||
defs = [
|
||||
classes.Definition(
|
||||
script._evaluator,
|
||||
script._infer_state,
|
||||
create_name(name)
|
||||
) for name in get_module_names(script._module_node, all_scopes)
|
||||
]
|
||||
|
||||
@@ -25,7 +25,7 @@ def _sort_names_by_start_pos(names):
|
||||
return sorted(names, key=lambda s: s.start_pos or (0, 0))
|
||||
|
||||
|
||||
def defined_names(evaluator, context):
|
||||
def defined_names(infer_state, context):
|
||||
"""
|
||||
List sub-definitions (e.g., methods in class).
|
||||
|
||||
@@ -34,11 +34,11 @@ def defined_names(evaluator, context):
|
||||
"""
|
||||
filter = next(context.get_filters(search_global=True))
|
||||
names = [name for name in filter.values()]
|
||||
return [Definition(evaluator, n) for n in _sort_names_by_start_pos(names)]
|
||||
return [Definition(infer_state, n) for n in _sort_names_by_start_pos(names)]
|
||||
|
||||
|
||||
def _contexts_to_definitions(contexts):
|
||||
return [Definition(c.evaluator, c.name) for c in contexts]
|
||||
return [Definition(c.infer_state, c.name) for c in contexts]
|
||||
|
||||
|
||||
class BaseDefinition(object):
|
||||
@@ -62,8 +62,8 @@ class BaseDefinition(object):
|
||||
'argparse._ActionsContainer': 'argparse.ArgumentParser',
|
||||
}.items())
|
||||
|
||||
def __init__(self, evaluator, name):
|
||||
self._evaluator = evaluator
|
||||
def __init__(self, infer_state, name):
|
||||
self._infer_state = infer_state
|
||||
self._name = name
|
||||
"""
|
||||
An instance of :class:`parso.python.tree.Name` subclass.
|
||||
@@ -306,7 +306,7 @@ class BaseDefinition(object):
|
||||
only_stubs=only_stubs,
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
return [self if n == self._name else Definition(self._evaluator, n)
|
||||
return [self if n == self._name else Definition(self._infer_state, n)
|
||||
for n in names]
|
||||
|
||||
def infer(self, **kwargs): # Python 2...
|
||||
@@ -329,7 +329,7 @@ class BaseDefinition(object):
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
resulting_names = [c.name for c in contexts]
|
||||
return [self if n == self._name else Definition(self._evaluator, n)
|
||||
return [self if n == self._name else Definition(self._infer_state, n)
|
||||
for n in resulting_names]
|
||||
|
||||
@property
|
||||
@@ -346,7 +346,7 @@ class BaseDefinition(object):
|
||||
for context in self._name.infer():
|
||||
for signature in context.get_signatures():
|
||||
return [
|
||||
Definition(self._evaluator, n)
|
||||
Definition(self._infer_state, n)
|
||||
for n in signature.get_param_names(resolve_stars=True)
|
||||
]
|
||||
|
||||
@@ -366,7 +366,7 @@ class BaseDefinition(object):
|
||||
|
||||
if isinstance(context, FunctionExecutionContext):
|
||||
context = context.function_context
|
||||
return Definition(self._evaluator, context.name)
|
||||
return Definition(self._infer_state, context.name)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %sname=%r, description=%r>" % (
|
||||
@@ -396,7 +396,7 @@ class BaseDefinition(object):
|
||||
return ''.join(lines[start_index:index + after + 1])
|
||||
|
||||
def get_signatures(self):
|
||||
return [Signature(self._evaluator, s) for s in self._name.infer().get_signatures()]
|
||||
return [Signature(self._infer_state, s) for s in self._name.infer().get_signatures()]
|
||||
|
||||
def execute(self):
|
||||
return _contexts_to_definitions(self._name.infer().execute_with_values())
|
||||
@@ -407,8 +407,8 @@ class Completion(BaseDefinition):
|
||||
`Completion` objects are returned from :meth:`api.Script.completions`. They
|
||||
provide additional information about a completion.
|
||||
"""
|
||||
def __init__(self, evaluator, name, stack, like_name_length):
|
||||
super(Completion, self).__init__(evaluator, name)
|
||||
def __init__(self, infer_state, name, stack, like_name_length):
|
||||
super(Completion, self).__init__(infer_state, name)
|
||||
|
||||
self._like_name_length = like_name_length
|
||||
self._stack = stack
|
||||
@@ -512,8 +512,8 @@ class Definition(BaseDefinition):
|
||||
*Definition* objects are returned from :meth:`api.Script.goto_assignments`
|
||||
or :meth:`api.Script.goto_definitions`.
|
||||
"""
|
||||
def __init__(self, evaluator, definition):
|
||||
super(Definition, self).__init__(evaluator, definition)
|
||||
def __init__(self, infer_state, definition):
|
||||
super(Definition, self).__init__(infer_state, definition)
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
@@ -588,7 +588,7 @@ class Definition(BaseDefinition):
|
||||
"""
|
||||
defs = self._name.infer()
|
||||
return sorted(
|
||||
unite(defined_names(self._evaluator, d) for d in defs),
|
||||
unite(defined_names(self._infer_state, d) for d in defs),
|
||||
key=lambda s: s._name.start_pos or (0, 0)
|
||||
)
|
||||
|
||||
@@ -606,13 +606,13 @@ class Definition(BaseDefinition):
|
||||
return self._name.start_pos == other._name.start_pos \
|
||||
and self.module_path == other.module_path \
|
||||
and self.name == other.name \
|
||||
and self._evaluator == other._evaluator
|
||||
and self._infer_state == other._infer_state
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self._name.start_pos, self.module_path, self.name, self._evaluator))
|
||||
return hash((self._name.start_pos, self.module_path, self.name, self._infer_state))
|
||||
|
||||
|
||||
class Signature(Definition):
|
||||
@@ -621,8 +621,8 @@ class Signature(Definition):
|
||||
It knows what functions you are currently in. e.g. `isinstance(` would
|
||||
return the `isinstance` function. without `(` it would return nothing.
|
||||
"""
|
||||
def __init__(self, evaluator, signature):
|
||||
super(Signature, self).__init__(evaluator, signature.name)
|
||||
def __init__(self, infer_state, signature):
|
||||
super(Signature, self).__init__(infer_state, signature.name)
|
||||
self._signature = signature
|
||||
|
||||
@property
|
||||
@@ -630,7 +630,7 @@ class Signature(Definition):
|
||||
"""
|
||||
:return list of ParamDefinition:
|
||||
"""
|
||||
return [ParamDefinition(self._evaluator, n)
|
||||
return [ParamDefinition(self._infer_state, n)
|
||||
for n in self._signature.get_param_names(resolve_stars=True)]
|
||||
|
||||
def to_string(self):
|
||||
@@ -644,8 +644,8 @@ class CallSignature(Signature):
|
||||
return the `isinstance` function with its params. Without `(` it would
|
||||
return nothing.
|
||||
"""
|
||||
def __init__(self, evaluator, signature, call_details):
|
||||
super(CallSignature, self).__init__(evaluator, signature)
|
||||
def __init__(self, infer_state, signature, call_details):
|
||||
super(CallSignature, self).__init__(infer_state, signature)
|
||||
self._call_details = call_details
|
||||
self._signature = signature
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ def get_call_signature_param_names(call_signatures):
|
||||
yield p._name
|
||||
|
||||
|
||||
def filter_names(evaluator, completion_names, stack, like_name):
|
||||
def filter_names(infer_state, completion_names, stack, like_name):
|
||||
comp_dct = {}
|
||||
if settings.case_insensitive_completion:
|
||||
like_name = like_name.lower()
|
||||
@@ -39,7 +39,7 @@ def filter_names(evaluator, completion_names, stack, like_name):
|
||||
|
||||
if string.startswith(like_name):
|
||||
new = classes.Completion(
|
||||
evaluator,
|
||||
infer_state,
|
||||
name,
|
||||
stack,
|
||||
len(like_name)
|
||||
@@ -85,8 +85,8 @@ def get_flow_scope_node(module_node, position):
|
||||
|
||||
|
||||
class Completion:
|
||||
def __init__(self, evaluator, module, code_lines, position, call_signatures_callback):
|
||||
self._evaluator = evaluator
|
||||
def __init__(self, infer_state, module, code_lines, position, call_signatures_callback):
|
||||
self._infer_state = infer_state
|
||||
self._module_context = module
|
||||
self._module_node = module.tree_node
|
||||
self._code_lines = code_lines
|
||||
@@ -104,7 +104,7 @@ class Completion:
|
||||
string, start_leaf = _extract_string_while_in_string(leaf, self._position)
|
||||
if string is not None:
|
||||
completions = list(file_name_completions(
|
||||
self._evaluator, self._module_context, start_leaf, string,
|
||||
self._infer_state, self._module_context, start_leaf, string,
|
||||
self._like_name, self._call_signatures_callback,
|
||||
self._code_lines, self._original_position
|
||||
))
|
||||
@@ -113,7 +113,7 @@ class Completion:
|
||||
|
||||
completion_names = self._get_context_completions(leaf)
|
||||
|
||||
completions = filter_names(self._evaluator, completion_names,
|
||||
completions = filter_names(self._infer_state, completion_names,
|
||||
self.stack, self._like_name)
|
||||
|
||||
return sorted(completions, key=lambda x: (x.name.startswith('__'),
|
||||
@@ -135,7 +135,7 @@ class Completion:
|
||||
- In params (also lambda): no completion before =
|
||||
"""
|
||||
|
||||
grammar = self._evaluator.grammar
|
||||
grammar = self._infer_state.grammar
|
||||
self.stack = stack = None
|
||||
|
||||
try:
|
||||
@@ -234,14 +234,14 @@ class Completion:
|
||||
def _get_keyword_completion_names(self, allowed_transitions):
|
||||
for k in allowed_transitions:
|
||||
if isinstance(k, str) and k.isalpha():
|
||||
yield keywords.KeywordName(self._evaluator, k)
|
||||
yield keywords.KeywordName(self._infer_state, k)
|
||||
|
||||
def _global_completions(self):
|
||||
context = get_user_scope(self._module_context, self._position)
|
||||
debug.dbg('global completion scope: %s', context)
|
||||
flow_scope_node = get_flow_scope_node(self._module_node, self._position)
|
||||
filters = get_global_filters(
|
||||
self._evaluator,
|
||||
self._infer_state,
|
||||
context,
|
||||
self._position,
|
||||
origin_scope=flow_scope_node
|
||||
@@ -253,7 +253,7 @@ class Completion:
|
||||
|
||||
def _trailer_completions(self, previous_leaf):
|
||||
user_context = get_user_scope(self._module_context, self._position)
|
||||
inferred_context = self._evaluator.create_context(
|
||||
inferred_context = self._infer_state.create_context(
|
||||
self._module_context, previous_leaf
|
||||
)
|
||||
contexts = infer_call_of_leaf(inferred_context, previous_leaf)
|
||||
@@ -276,8 +276,8 @@ class Completion:
|
||||
|
||||
def _get_importer_names(self, names, level=0, only_modules=True):
|
||||
names = [n.value for n in names]
|
||||
i = imports.Importer(self._evaluator, names, self._module_context, level)
|
||||
return i.completion_names(self._evaluator, only_modules=only_modules)
|
||||
i = imports.Importer(self._infer_state, names, self._module_context, level)
|
||||
return i.completion_names(self._infer_state, only_modules=only_modules)
|
||||
|
||||
def _get_class_context_completions(self, is_function=True):
|
||||
"""
|
||||
|
||||
@@ -11,7 +11,7 @@ from collections import namedtuple
|
||||
from jedi._compatibility import highest_pickle_protocol, which
|
||||
from jedi.cache import memoize_method, time_cache
|
||||
from jedi.inference.compiled.subprocess import CompiledSubprocess, \
|
||||
EvaluatorSameProcess, EvaluatorSubprocess
|
||||
InferStateSameProcess, InferStateSubprocess
|
||||
|
||||
import parso
|
||||
|
||||
@@ -109,8 +109,8 @@ class Environment(_BaseEnvironment):
|
||||
version = '.'.join(str(i) for i in self.version_info)
|
||||
return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path)
|
||||
|
||||
def get_evaluator_subprocess(self, evaluator):
|
||||
return EvaluatorSubprocess(evaluator, self._get_subprocess())
|
||||
def get_infer_state_subprocess(self, infer_state):
|
||||
return InferStateSubprocess(infer_state, self._get_subprocess())
|
||||
|
||||
@memoize_method
|
||||
def get_sys_path(self):
|
||||
@@ -140,8 +140,8 @@ class SameEnvironment(_SameEnvironmentMixin, Environment):
|
||||
|
||||
|
||||
class InterpreterEnvironment(_SameEnvironmentMixin, _BaseEnvironment):
|
||||
def get_evaluator_subprocess(self, evaluator):
|
||||
return EvaluatorSameProcess(evaluator)
|
||||
def get_infer_state_subprocess(self, infer_state):
|
||||
return InferStateSameProcess(infer_state)
|
||||
|
||||
def get_sys_path(self):
|
||||
return sys.path
|
||||
|
||||
@@ -7,7 +7,7 @@ from jedi.inference.helpers import get_str_or_none
|
||||
from jedi.parser_utils import get_string_quote
|
||||
|
||||
|
||||
def file_name_completions(evaluator, module_context, start_leaf, string,
|
||||
def file_name_completions(infer_state, module_context, start_leaf, string,
|
||||
like_name, call_signatures_callback, code_lines, position):
|
||||
# First we want to find out what can actually be changed as a name.
|
||||
like_name_length = len(os.path.basename(string) + like_name)
|
||||
@@ -30,7 +30,7 @@ def file_name_completions(evaluator, module_context, start_leaf, string,
|
||||
is_in_os_path_join = False
|
||||
else:
|
||||
string = to_be_added + string
|
||||
base_path = os.path.join(evaluator.project._path, string)
|
||||
base_path = os.path.join(infer_state.project._path, string)
|
||||
try:
|
||||
listed = os.listdir(base_path)
|
||||
except FileNotFoundError:
|
||||
@@ -53,8 +53,8 @@ def file_name_completions(evaluator, module_context, start_leaf, string,
|
||||
name += os.path.sep
|
||||
|
||||
yield classes.Completion(
|
||||
evaluator,
|
||||
FileName(evaluator, name[len(must_start_with) - like_name_length:]),
|
||||
infer_state,
|
||||
FileName(infer_state, name[len(must_start_with) - like_name_length:]),
|
||||
stack=None,
|
||||
like_name_length=like_name_length
|
||||
)
|
||||
|
||||
@@ -136,11 +136,11 @@ def get_stack_at_position(grammar, code_lines, leaf, pos):
|
||||
)
|
||||
|
||||
|
||||
def infer_goto_definition(evaluator, context, leaf):
|
||||
def infer_goto_definition(infer_state, context, leaf):
|
||||
if leaf.type == 'name':
|
||||
# In case of a name we can just use goto_definition which does all the
|
||||
# magic itself.
|
||||
return evaluator.goto_definitions(context, leaf)
|
||||
return infer_state.goto_definitions(context, leaf)
|
||||
|
||||
parent = leaf.parent
|
||||
definitions = NO_CONTEXTS
|
||||
@@ -154,7 +154,7 @@ def infer_goto_definition(evaluator, context, leaf):
|
||||
# e.g. `"foo"` or `1.0`
|
||||
return infer_atom(context, leaf)
|
||||
elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'):
|
||||
return get_string_context_set(evaluator)
|
||||
return get_string_context_set(infer_state)
|
||||
return definitions
|
||||
|
||||
|
||||
@@ -376,7 +376,7 @@ def get_call_signature_details(module, position):
|
||||
|
||||
|
||||
@call_signature_time_cache("call_signatures_validity")
|
||||
def cache_call_signatures(evaluator, context, bracket_leaf, code_lines, user_pos):
|
||||
def cache_call_signatures(infer_state, context, bracket_leaf, code_lines, user_pos):
|
||||
"""This function calculates the cache key."""
|
||||
line_index = user_pos[0] - 1
|
||||
|
||||
@@ -391,7 +391,7 @@ def cache_call_signatures(evaluator, context, bracket_leaf, code_lines, user_pos
|
||||
else:
|
||||
yield (module_path, before_bracket, bracket_leaf.start_pos)
|
||||
yield infer_goto_definition(
|
||||
evaluator,
|
||||
infer_state,
|
||||
context,
|
||||
bracket_leaf.get_previous_leaf(),
|
||||
)
|
||||
|
||||
@@ -9,9 +9,9 @@ from jedi.inference.compiled.access import create_access_path
|
||||
from jedi.inference.base_context import ContextWrapper
|
||||
|
||||
|
||||
def _create(evaluator, obj):
|
||||
def _create(infer_state, obj):
|
||||
return compiled.create_from_access_path(
|
||||
evaluator, create_access_path(evaluator, obj)
|
||||
infer_state, create_access_path(infer_state, obj)
|
||||
)
|
||||
|
||||
|
||||
@@ -23,9 +23,9 @@ class NamespaceObject(object):
|
||||
class MixedModuleContext(ContextWrapper):
|
||||
type = 'mixed_module'
|
||||
|
||||
def __init__(self, evaluator, tree_module, namespaces, file_io, code_lines):
|
||||
def __init__(self, infer_state, tree_module, namespaces, file_io, code_lines):
|
||||
module_context = ModuleContext(
|
||||
evaluator, tree_module,
|
||||
infer_state, tree_module,
|
||||
file_io=file_io,
|
||||
string_names=('__main__',),
|
||||
code_lines=code_lines
|
||||
@@ -38,7 +38,7 @@ class MixedModuleContext(ContextWrapper):
|
||||
yield filter
|
||||
|
||||
for namespace_obj in self._namespace_objects:
|
||||
compiled_object = _create(self.evaluator, namespace_obj)
|
||||
compiled_object = _create(self.infer_state, namespace_obj)
|
||||
mixed_object = mixed.MixedObject(
|
||||
compiled_object=compiled_object,
|
||||
tree_context=self._wrapped_context
|
||||
|
||||
@@ -15,24 +15,24 @@ except ImportError:
|
||||
pydoc_topics = None
|
||||
|
||||
|
||||
def get_operator(evaluator, string, pos):
|
||||
return Keyword(evaluator, string, pos)
|
||||
def get_operator(infer_state, string, pos):
|
||||
return Keyword(infer_state, string, pos)
|
||||
|
||||
|
||||
class KeywordName(AbstractArbitraryName):
|
||||
api_type = u'keyword'
|
||||
|
||||
def infer(self):
|
||||
return [Keyword(self.evaluator, self.string_name, (0, 0))]
|
||||
return [Keyword(self.infer_state, self.string_name, (0, 0))]
|
||||
|
||||
|
||||
class Keyword(object):
|
||||
api_type = u'keyword'
|
||||
|
||||
def __init__(self, evaluator, name, pos):
|
||||
self.name = KeywordName(evaluator, name)
|
||||
def __init__(self, infer_state, name, pos):
|
||||
self.name = KeywordName(infer_state, name)
|
||||
self.start_pos = pos
|
||||
self.parent = evaluator.builtins_module
|
||||
self.parent = infer_state.builtins_module
|
||||
|
||||
@property
|
||||
def names(self):
|
||||
|
||||
@@ -7,7 +7,7 @@ from jedi.api.environment import SameEnvironment, \
|
||||
from jedi.api.exceptions import WrongVersion
|
||||
from jedi._compatibility import force_unicode
|
||||
from jedi.inference.sys_path import discover_buildout_paths
|
||||
from jedi.inference.cache import evaluator_as_method_param_cache
|
||||
from jedi.inference.cache import infer_state_as_method_param_cache
|
||||
from jedi.common.utils import traverse_parents
|
||||
|
||||
_CONFIG_FOLDER = '.jedi'
|
||||
@@ -77,8 +77,8 @@ class Project(object):
|
||||
|
||||
py2_comp(path, **kwargs)
|
||||
|
||||
@evaluator_as_method_param_cache()
|
||||
def _get_base_sys_path(self, evaluator, environment=None):
|
||||
@infer_state_as_method_param_cache()
|
||||
def _get_base_sys_path(self, infer_state, environment=None):
|
||||
if self._sys_path is not None:
|
||||
return self._sys_path
|
||||
|
||||
@@ -93,8 +93,8 @@ class Project(object):
|
||||
pass
|
||||
return sys_path
|
||||
|
||||
@evaluator_as_method_param_cache()
|
||||
def _get_sys_path(self, evaluator, environment=None, add_parent_paths=True):
|
||||
@infer_state_as_method_param_cache()
|
||||
def _get_sys_path(self, infer_state, environment=None, add_parent_paths=True):
|
||||
"""
|
||||
Keep this method private for all users of jedi. However internally this
|
||||
one is used like a public method.
|
||||
@@ -102,15 +102,15 @@ class Project(object):
|
||||
suffixed = []
|
||||
prefixed = []
|
||||
|
||||
sys_path = list(self._get_base_sys_path(evaluator, environment))
|
||||
sys_path = list(self._get_base_sys_path(infer_state, environment))
|
||||
if self._smart_sys_path:
|
||||
prefixed.append(self._path)
|
||||
|
||||
if evaluator.script_path is not None:
|
||||
suffixed += discover_buildout_paths(evaluator, evaluator.script_path)
|
||||
if infer_state.script_path is not None:
|
||||
suffixed += discover_buildout_paths(infer_state, infer_state.script_path)
|
||||
|
||||
if add_parent_paths:
|
||||
traversed = list(traverse_parents(evaluator.script_path))
|
||||
traversed = list(traverse_parents(infer_state.script_path))
|
||||
|
||||
# AFAIK some libraries have imports like `foo.foo.bar`, which
|
||||
# leads to the conclusion to by default prefer longer paths
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
class BaseContext(object):
|
||||
def __init__(self, evaluator, parent_context=None):
|
||||
self.evaluator = evaluator
|
||||
def __init__(self, infer_state, parent_context=None):
|
||||
self.infer_state = infer_state
|
||||
self.parent_context = parent_context
|
||||
|
||||
def get_root_context(self):
|
||||
|
||||
@@ -15,7 +15,7 @@ Type inference of Python code in |jedi| is based on three assumptions:
|
||||
The actual algorithm is based on a principle I call lazy type inference. That
|
||||
said, the typical entry point for static analysis is calling
|
||||
``infer_expr_stmt``. There's separate logic for autocompletion in the API, the
|
||||
evaluator is all about inferring an expression.
|
||||
infer_state is all about inferring an expression.
|
||||
|
||||
TODO this paragraph is not what jedi does anymore, it's similar, but not the
|
||||
same.
|
||||
@@ -32,9 +32,9 @@ return the ``date`` class.
|
||||
|
||||
To *visualize* this (simplified):
|
||||
|
||||
- ``Evaluator.infer_expr_stmt`` doesn't do much, because there's no assignment.
|
||||
- ``InferState.infer_expr_stmt`` doesn't do much, because there's no assignment.
|
||||
- ``Context.infer_node`` cares for resolving the dotted path
|
||||
- ``Evaluator.find_types`` searches for global definitions of datetime, which
|
||||
- ``InferState.find_types`` searches for global definitions of datetime, which
|
||||
it finds in the definition of an import, by scanning the syntax tree.
|
||||
- Using the import logic, the datetime module is found.
|
||||
- Now ``find_types`` is called again by ``infer_node`` to find ``date``
|
||||
@@ -72,7 +72,7 @@ from jedi import parser_utils
|
||||
from jedi.inference.utils import unite
|
||||
from jedi.inference import imports
|
||||
from jedi.inference import recursion
|
||||
from jedi.inference.cache import evaluator_function_cache
|
||||
from jedi.inference.cache import infer_state_function_cache
|
||||
from jedi.inference import helpers
|
||||
from jedi.inference.names import TreeNameDefinition, ParamName
|
||||
from jedi.inference.base_context import ContextualizedName, ContextualizedNode, \
|
||||
@@ -85,13 +85,13 @@ from jedi.inference.syntax_tree import infer_trailer, infer_expr_stmt, \
|
||||
from jedi.plugins import plugin_manager
|
||||
|
||||
|
||||
class Evaluator(object):
|
||||
class InferState(object):
|
||||
def __init__(self, project, environment=None, script_path=None):
|
||||
if environment is None:
|
||||
environment = project.get_environment()
|
||||
self.environment = environment
|
||||
self.script_path = script_path
|
||||
self.compiled_subprocess = environment.get_evaluator_subprocess(self)
|
||||
self.compiled_subprocess = environment.get_infer_state_subprocess(self)
|
||||
self.grammar = environment.get_grammar()
|
||||
|
||||
self.latest_grammar = parso.load_grammar(version='3.7')
|
||||
@@ -128,7 +128,7 @@ class Evaluator(object):
|
||||
return context_set
|
||||
|
||||
@property
|
||||
@evaluator_function_cache()
|
||||
@infer_state_function_cache()
|
||||
def builtins_module(self):
|
||||
module_name = u'builtins'
|
||||
if self.environment.version_info.major == 2:
|
||||
@@ -137,7 +137,7 @@ class Evaluator(object):
|
||||
return builtins_module
|
||||
|
||||
@property
|
||||
@evaluator_function_cache()
|
||||
@infer_state_function_cache()
|
||||
def typing_module(self):
|
||||
typing_module, = self.import_module((u'typing',))
|
||||
return typing_module
|
||||
@@ -233,7 +233,7 @@ class Evaluator(object):
|
||||
return infer_node(context, element)
|
||||
return self._infer_element_cached(context, element)
|
||||
|
||||
@evaluator_function_cache(default=NO_CONTEXTS)
|
||||
@infer_state_function_cache(default=NO_CONTEXTS)
|
||||
def _infer_element_cached(self, context, element):
|
||||
return infer_node(context, element)
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ def add(node_context, error_name, node, message=None, typ=Error, payload=None):
|
||||
module_path = module_context.py__file__()
|
||||
issue_instance = typ(error_name, module_path, node.start_pos, message)
|
||||
debug.warning(str(issue_instance), format=False)
|
||||
node_context.evaluator.analysis.append(issue_instance)
|
||||
node_context.infer_state.analysis.append(issue_instance)
|
||||
return issue_instance
|
||||
|
||||
|
||||
@@ -149,7 +149,7 @@ def _check_for_exception_catch(node_context, jedi_name, exception, payload=None)
|
||||
|
||||
for python_cls in exception.mro():
|
||||
if cls.py__name__() == python_cls.__name__ \
|
||||
and cls.parent_context == cls.evaluator.builtins_module:
|
||||
and cls.parent_context == cls.infer_state.builtins_module:
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -192,7 +192,7 @@ def _check_for_exception_catch(node_context, jedi_name, exception, payload=None)
|
||||
arglist = trailer.children[1]
|
||||
assert arglist.type == 'arglist'
|
||||
from jedi.inference.arguments import TreeArguments
|
||||
args = list(TreeArguments(node_context.evaluator, node_context, arglist).unpack())
|
||||
args = list(TreeArguments(node_context.infer_state, node_context, arglist).unpack())
|
||||
# Arguments should be very simple
|
||||
assert len(args) == 2
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ from jedi.inference.lazy_context import LazyKnownContext, LazyKnownContexts, \
|
||||
from jedi.inference.names import ParamName, TreeNameDefinition
|
||||
from jedi.inference.base_context import NO_CONTEXTS, ContextSet, ContextualizedNode
|
||||
from jedi.inference.context import iterable
|
||||
from jedi.inference.cache import evaluator_as_method_param_cache
|
||||
from jedi.inference.cache import infer_state_as_method_param_cache
|
||||
from jedi.inference.param import get_executed_params_and_issues, ExecutedParam
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ def repack_with_argument_clinic(string, keep_arguments_param=False, keep_callbac
|
||||
kwargs.pop('callback', None)
|
||||
try:
|
||||
args += tuple(_iterate_argument_clinic(
|
||||
context.evaluator,
|
||||
context.infer_state,
|
||||
arguments,
|
||||
clinic_args
|
||||
))
|
||||
@@ -72,7 +72,7 @@ def repack_with_argument_clinic(string, keep_arguments_param=False, keep_callbac
|
||||
return decorator
|
||||
|
||||
|
||||
def _iterate_argument_clinic(evaluator, arguments, parameters):
|
||||
def _iterate_argument_clinic(infer_state, arguments, parameters):
|
||||
"""Uses a list with argument clinic information (see PEP 436)."""
|
||||
iterator = PushBackIterator(arguments.unpack())
|
||||
for i, (name, optional, allow_kwargs, stars) in enumerate(parameters):
|
||||
@@ -84,7 +84,7 @@ def _iterate_argument_clinic(evaluator, arguments, parameters):
|
||||
break
|
||||
|
||||
lazy_contexts.append(argument)
|
||||
yield ContextSet([iterable.FakeSequence(evaluator, u'tuple', lazy_contexts)])
|
||||
yield ContextSet([iterable.FakeSequence(infer_state, u'tuple', lazy_contexts)])
|
||||
lazy_contexts
|
||||
continue
|
||||
elif stars == 2:
|
||||
@@ -161,7 +161,7 @@ class AnonymousArguments(AbstractArguments):
|
||||
def get_executed_params_and_issues(self, execution_context):
|
||||
from jedi.inference.dynamic import search_params
|
||||
return search_params(
|
||||
execution_context.evaluator,
|
||||
execution_context.infer_state,
|
||||
execution_context,
|
||||
execution_context.tree_node
|
||||
), []
|
||||
@@ -198,17 +198,17 @@ def unpack_arglist(arglist):
|
||||
|
||||
|
||||
class TreeArguments(AbstractArguments):
|
||||
def __init__(self, evaluator, context, argument_node, trailer=None):
|
||||
def __init__(self, infer_state, context, argument_node, trailer=None):
|
||||
"""
|
||||
:param argument_node: May be an argument_node or a list of nodes.
|
||||
"""
|
||||
self.argument_node = argument_node
|
||||
self.context = context
|
||||
self._evaluator = evaluator
|
||||
self._infer_state = infer_state
|
||||
self.trailer = trailer # Can be None, e.g. in a class definition.
|
||||
|
||||
@classmethod
|
||||
@evaluator_as_method_param_cache()
|
||||
@infer_state_as_method_param_cache()
|
||||
def create_cached(cls, *args, **kwargs):
|
||||
return cls(*args, **kwargs)
|
||||
|
||||
@@ -241,7 +241,7 @@ class TreeArguments(AbstractArguments):
|
||||
if sync_comp_for.type == 'comp_for':
|
||||
sync_comp_for = sync_comp_for.children[1]
|
||||
comp = iterable.GeneratorComprehension(
|
||||
self._evaluator,
|
||||
self._infer_state,
|
||||
defining_context=self.context,
|
||||
sync_comp_for_node=sync_comp_for,
|
||||
entry_node=el.children[0],
|
||||
|
||||
@@ -16,7 +16,7 @@ from jedi.parser_utils import clean_scope_docstring
|
||||
from jedi.common import BaseContextSet, BaseContext
|
||||
from jedi.inference.helpers import SimpleGetItemNotFound
|
||||
from jedi.inference.utils import safe_property
|
||||
from jedi.inference.cache import evaluator_as_method_param_cache
|
||||
from jedi.inference.cache import infer_state_as_method_param_cache
|
||||
from jedi.cache import memoize_method
|
||||
|
||||
_sentinel = object()
|
||||
@@ -31,17 +31,17 @@ class HelperContextMixin(object):
|
||||
context = context.parent_context
|
||||
|
||||
@classmethod
|
||||
@evaluator_as_method_param_cache()
|
||||
@infer_state_as_method_param_cache()
|
||||
def create_cached(cls, *args, **kwargs):
|
||||
return cls(*args, **kwargs)
|
||||
|
||||
def execute(self, arguments):
|
||||
return self.evaluator.execute(self, arguments=arguments)
|
||||
return self.infer_state.execute(self, arguments=arguments)
|
||||
|
||||
def execute_with_values(self, *value_list):
|
||||
from jedi.inference.arguments import ValuesArguments
|
||||
arguments = ValuesArguments([ContextSet([value]) for value in value_list])
|
||||
return self.evaluator.execute(self, arguments)
|
||||
return self.infer_state.execute(self, arguments)
|
||||
|
||||
def execute_annotation(self):
|
||||
return self.execute_with_values()
|
||||
@@ -64,7 +64,7 @@ class HelperContextMixin(object):
|
||||
if name_context is None:
|
||||
name_context = self
|
||||
from jedi.inference import finder
|
||||
f = finder.NameFinder(self.evaluator, self, name_context, name_or_str,
|
||||
f = finder.NameFinder(self.infer_state, self, name_context, name_or_str,
|
||||
position, analysis_errors=analysis_errors)
|
||||
filters = f.get_filters(search_global)
|
||||
if is_goto:
|
||||
@@ -78,10 +78,10 @@ class HelperContextMixin(object):
|
||||
return await_context_set.execute_with_values()
|
||||
|
||||
def infer_node(self, node):
|
||||
return self.evaluator.infer_element(self, node)
|
||||
return self.infer_state.infer_element(self, node)
|
||||
|
||||
def create_context(self, node, node_is_context=False, node_is_object=False):
|
||||
return self.evaluator.create_context(self, node, node_is_context, node_is_object)
|
||||
return self.infer_state.create_context(self, node, node_is_context, node_is_object)
|
||||
|
||||
def iterate(self, contextualized_node=None, is_async=False):
|
||||
debug.dbg('iterate %s', self)
|
||||
@@ -236,8 +236,8 @@ class _ContextWrapperBase(HelperContextMixin):
|
||||
return CompiledContextName(self, wrapped_name.string_name)
|
||||
|
||||
@classmethod
|
||||
@evaluator_as_method_param_cache()
|
||||
def create_cached(cls, evaluator, *args, **kwargs):
|
||||
@infer_state_as_method_param_cache()
|
||||
def create_cached(cls, infer_state, *args, **kwargs):
|
||||
return cls(*args, **kwargs)
|
||||
|
||||
def __getattr__(self, name):
|
||||
@@ -268,8 +268,8 @@ class ContextWrapper(_ContextWrapperBase):
|
||||
|
||||
|
||||
class TreeContext(Context):
|
||||
def __init__(self, evaluator, parent_context, tree_node):
|
||||
super(TreeContext, self).__init__(evaluator, parent_context)
|
||||
def __init__(self, infer_state, parent_context, tree_node):
|
||||
super(TreeContext, self).__init__(infer_state, parent_context)
|
||||
self.predefined_names = {}
|
||||
self.tree_node = tree_node
|
||||
|
||||
@@ -395,7 +395,7 @@ class ContextSet(BaseContextSet):
|
||||
)
|
||||
|
||||
def execute(self, arguments):
|
||||
return ContextSet.from_sets(c.evaluator.execute(c, arguments) for c in self._set)
|
||||
return ContextSet.from_sets(c.infer_state.execute(c, arguments) for c in self._set)
|
||||
|
||||
def execute_with_values(self, *args, **kwargs):
|
||||
return ContextSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set)
|
||||
|
||||
@@ -10,7 +10,7 @@ _NO_DEFAULT = object()
|
||||
_RECURSION_SENTINEL = object()
|
||||
|
||||
|
||||
def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False):
|
||||
def _memoize_default(default=_NO_DEFAULT, infer_state_is_first_arg=False, second_arg_is_infer_state=False):
|
||||
""" This is a typical memoization decorator, BUT there is one difference:
|
||||
To prevent recursion it sets defaults.
|
||||
|
||||
@@ -21,12 +21,12 @@ def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_a
|
||||
def func(function):
|
||||
def wrapper(obj, *args, **kwargs):
|
||||
# TODO These checks are kind of ugly and slow.
|
||||
if evaluator_is_first_arg:
|
||||
if infer_state_is_first_arg:
|
||||
cache = obj.memoize_cache
|
||||
elif second_arg_is_evaluator:
|
||||
elif second_arg_is_infer_state:
|
||||
cache = args[0].memoize_cache # needed for meta classes
|
||||
else:
|
||||
cache = obj.evaluator.memoize_cache
|
||||
cache = obj.infer_state.memoize_cache
|
||||
|
||||
try:
|
||||
memo = cache[function]
|
||||
@@ -47,23 +47,23 @@ def _memoize_default(default=_NO_DEFAULT, evaluator_is_first_arg=False, second_a
|
||||
return func
|
||||
|
||||
|
||||
def evaluator_function_cache(default=_NO_DEFAULT):
|
||||
def infer_state_function_cache(default=_NO_DEFAULT):
|
||||
def decorator(func):
|
||||
return _memoize_default(default=default, evaluator_is_first_arg=True)(func)
|
||||
return _memoize_default(default=default, infer_state_is_first_arg=True)(func)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def evaluator_method_cache(default=_NO_DEFAULT):
|
||||
def infer_state_method_cache(default=_NO_DEFAULT):
|
||||
def decorator(func):
|
||||
return _memoize_default(default=default)(func)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def evaluator_as_method_param_cache():
|
||||
def infer_state_as_method_param_cache():
|
||||
def decorator(call):
|
||||
return _memoize_default(second_arg_is_evaluator=True)(call)
|
||||
return _memoize_default(second_arg_is_infer_state=True)(call)
|
||||
|
||||
return decorator
|
||||
|
||||
@@ -74,19 +74,19 @@ class CachedMetaClass(type):
|
||||
class initializations. Either you do it this way or with decorators, but
|
||||
with decorators you lose class access (isinstance, etc).
|
||||
"""
|
||||
@evaluator_as_method_param_cache()
|
||||
@infer_state_as_method_param_cache()
|
||||
def __call__(self, *args, **kwargs):
|
||||
return super(CachedMetaClass, self).__call__(*args, **kwargs)
|
||||
|
||||
|
||||
def evaluator_method_generator_cache():
|
||||
def infer_state_method_generator_cache():
|
||||
"""
|
||||
This is a special memoizer. It memoizes generators and also checks for
|
||||
recursion errors and returns no further iterator elemends in that case.
|
||||
"""
|
||||
def func(function):
|
||||
def wrapper(obj, *args, **kwargs):
|
||||
cache = obj.evaluator.memoize_cache
|
||||
cache = obj.infer_state.memoize_cache
|
||||
try:
|
||||
memo = cache[function]
|
||||
except KeyError:
|
||||
|
||||
@@ -4,8 +4,8 @@ from jedi.inference.compiled.context import CompiledObject, CompiledName, \
|
||||
from jedi.inference.base_context import ContextWrapper, LazyContextWrapper
|
||||
|
||||
|
||||
def builtin_from_name(evaluator, string):
|
||||
typing_builtins_module = evaluator.builtins_module
|
||||
def builtin_from_name(infer_state, string):
|
||||
typing_builtins_module = infer_state.builtins_module
|
||||
if string in ('None', 'True', 'False'):
|
||||
builtins, = typing_builtins_module.non_stub_context_set
|
||||
filter_ = next(builtins.get_filters())
|
||||
@@ -18,7 +18,7 @@ def builtin_from_name(evaluator, string):
|
||||
|
||||
class CompiledValue(LazyContextWrapper):
|
||||
def __init__(self, compiled_obj):
|
||||
self.evaluator = compiled_obj.evaluator
|
||||
self.infer_state = compiled_obj.infer_state
|
||||
self._compiled_obj = compiled_obj
|
||||
|
||||
def __getattribute__(self, name):
|
||||
@@ -29,36 +29,36 @@ class CompiledValue(LazyContextWrapper):
|
||||
|
||||
def _get_wrapped_context(self):
|
||||
instance, = builtin_from_name(
|
||||
self.evaluator, self._compiled_obj.name.string_name).execute_with_values()
|
||||
self.infer_state, self._compiled_obj.name.string_name).execute_with_values()
|
||||
return instance
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._compiled_obj)
|
||||
|
||||
|
||||
def create_simple_object(evaluator, obj):
|
||||
def create_simple_object(infer_state, obj):
|
||||
"""
|
||||
Only allows creations of objects that are easily picklable across Python
|
||||
versions.
|
||||
"""
|
||||
assert type(obj) in (int, float, str, bytes, unicode, slice, complex, bool), obj
|
||||
compiled_obj = create_from_access_path(
|
||||
evaluator,
|
||||
evaluator.compiled_subprocess.create_simple_object(obj)
|
||||
infer_state,
|
||||
infer_state.compiled_subprocess.create_simple_object(obj)
|
||||
)
|
||||
return CompiledValue(compiled_obj)
|
||||
|
||||
|
||||
def get_string_context_set(evaluator):
|
||||
return builtin_from_name(evaluator, u'str').execute_with_values()
|
||||
def get_string_context_set(infer_state):
|
||||
return builtin_from_name(infer_state, u'str').execute_with_values()
|
||||
|
||||
|
||||
def load_module(evaluator, dotted_name, **kwargs):
|
||||
def load_module(infer_state, dotted_name, **kwargs):
|
||||
# Temporary, some tensorflow builtins cannot be loaded, so it's tried again
|
||||
# and again and it's really slow.
|
||||
if dotted_name.startswith('tensorflow.'):
|
||||
return None
|
||||
access_path = evaluator.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs)
|
||||
access_path = infer_state.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs)
|
||||
if access_path is None:
|
||||
return None
|
||||
return create_from_access_path(evaluator, access_path)
|
||||
return create_from_access_path(infer_state, access_path)
|
||||
|
||||
@@ -109,8 +109,8 @@ def compiled_objects_cache(attribute_name):
|
||||
Caching the id has the advantage that an object doesn't need to be
|
||||
hashable.
|
||||
"""
|
||||
def wrapper(evaluator, obj, parent_context=None):
|
||||
cache = getattr(evaluator, attribute_name)
|
||||
def wrapper(infer_state, obj, parent_context=None):
|
||||
cache = getattr(infer_state, attribute_name)
|
||||
# Do a very cheap form of caching here.
|
||||
key = id(obj)
|
||||
try:
|
||||
@@ -119,9 +119,9 @@ def compiled_objects_cache(attribute_name):
|
||||
except KeyError:
|
||||
# TODO wuaaaarrghhhhhhhh
|
||||
if attribute_name == 'mixed_cache':
|
||||
result = func(evaluator, obj, parent_context)
|
||||
result = func(infer_state, obj, parent_context)
|
||||
else:
|
||||
result = func(evaluator, obj)
|
||||
result = func(infer_state, obj)
|
||||
# Need to cache all of them, otherwise the id could be overwritten.
|
||||
cache[key] = result, obj, parent_context
|
||||
return result
|
||||
@@ -130,11 +130,11 @@ def compiled_objects_cache(attribute_name):
|
||||
return decorator
|
||||
|
||||
|
||||
def create_access(evaluator, obj):
|
||||
return evaluator.compiled_subprocess.get_or_create_access_handle(obj)
|
||||
def create_access(infer_state, obj):
|
||||
return infer_state.compiled_subprocess.get_or_create_access_handle(obj)
|
||||
|
||||
|
||||
def load_module(evaluator, dotted_name, sys_path):
|
||||
def load_module(infer_state, dotted_name, sys_path):
|
||||
temp, sys.path = sys.path, sys_path
|
||||
try:
|
||||
__import__(dotted_name)
|
||||
@@ -154,7 +154,7 @@ def load_module(evaluator, dotted_name, sys_path):
|
||||
# Just access the cache after import, because of #59 as well as the very
|
||||
# complicated import structure of Python.
|
||||
module = sys.modules[dotted_name]
|
||||
return create_access_path(evaluator, module)
|
||||
return create_access_path(infer_state, module)
|
||||
|
||||
|
||||
class AccessPath(object):
|
||||
@@ -171,8 +171,8 @@ class AccessPath(object):
|
||||
self.accesses = value
|
||||
|
||||
|
||||
def create_access_path(evaluator, obj):
|
||||
access = create_access(evaluator, obj)
|
||||
def create_access_path(infer_state, obj):
|
||||
access = create_access(infer_state, obj)
|
||||
return AccessPath(access.get_access_path_tuples())
|
||||
|
||||
|
||||
@@ -193,18 +193,18 @@ def get_api_type(obj):
|
||||
|
||||
|
||||
class DirectObjectAccess(object):
|
||||
def __init__(self, evaluator, obj):
|
||||
self._evaluator = evaluator
|
||||
def __init__(self, infer_state, obj):
|
||||
self._infer_state = infer_state
|
||||
self._obj = obj
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (self.__class__.__name__, self.get_repr())
|
||||
|
||||
def _create_access(self, obj):
|
||||
return create_access(self._evaluator, obj)
|
||||
return create_access(self._infer_state, obj)
|
||||
|
||||
def _create_access_path(self, obj):
|
||||
return create_access_path(self._evaluator, obj)
|
||||
return create_access_path(self._infer_state, obj)
|
||||
|
||||
def py__bool__(self):
|
||||
return bool(self._obj)
|
||||
@@ -376,7 +376,7 @@ class DirectObjectAccess(object):
|
||||
return get_api_type(self._obj)
|
||||
|
||||
def get_access_path_tuples(self):
|
||||
accesses = [create_access(self._evaluator, o) for o in self._get_objects_path()]
|
||||
accesses = [create_access(self._infer_state, o) for o in self._get_objects_path()]
|
||||
return [(access.py__name__(), access) for access in accesses]
|
||||
|
||||
def _get_objects_path(self):
|
||||
|
||||
@@ -14,7 +14,7 @@ from jedi.inference.names import AbstractNameDefinition, ContextNameMixin, \
|
||||
from jedi.inference.base_context import Context, ContextSet, NO_CONTEXTS
|
||||
from jedi.inference.lazy_context import LazyKnownContext
|
||||
from jedi.inference.compiled.access import _sentinel
|
||||
from jedi.inference.cache import evaluator_function_cache
|
||||
from jedi.inference.cache import infer_state_function_cache
|
||||
from jedi.inference.helpers import reraise_getitem_errors
|
||||
from jedi.inference.signature import BuiltinSignature
|
||||
|
||||
@@ -41,15 +41,15 @@ class CheckAttribute(object):
|
||||
|
||||
|
||||
class CompiledObject(Context):
|
||||
def __init__(self, evaluator, access_handle, parent_context=None):
|
||||
super(CompiledObject, self).__init__(evaluator, parent_context)
|
||||
def __init__(self, infer_state, access_handle, parent_context=None):
|
||||
super(CompiledObject, self).__init__(infer_state, parent_context)
|
||||
self.access_handle = access_handle
|
||||
|
||||
def py__call__(self, arguments):
|
||||
return_annotation = self.access_handle.get_return_annotation()
|
||||
if return_annotation is not None:
|
||||
# TODO the return annotation may also be a string.
|
||||
return create_from_access_path(self.evaluator, return_annotation).execute_annotation()
|
||||
return create_from_access_path(self.infer_state, return_annotation).execute_annotation()
|
||||
|
||||
try:
|
||||
self.access_handle.getattr_paths(u'__call__')
|
||||
@@ -59,26 +59,26 @@ class CompiledObject(Context):
|
||||
if self.access_handle.is_class():
|
||||
from jedi.inference.context import CompiledInstance
|
||||
return ContextSet([
|
||||
CompiledInstance(self.evaluator, self.parent_context, self, arguments)
|
||||
CompiledInstance(self.infer_state, self.parent_context, self, arguments)
|
||||
])
|
||||
else:
|
||||
return ContextSet(self._execute_function(arguments))
|
||||
|
||||
@CheckAttribute()
|
||||
def py__class__(self):
|
||||
return create_from_access_path(self.evaluator, self.access_handle.py__class__())
|
||||
return create_from_access_path(self.infer_state, self.access_handle.py__class__())
|
||||
|
||||
@CheckAttribute()
|
||||
def py__mro__(self):
|
||||
return (self,) + tuple(
|
||||
create_from_access_path(self.evaluator, access)
|
||||
create_from_access_path(self.infer_state, access)
|
||||
for access in self.access_handle.py__mro__accesses()
|
||||
)
|
||||
|
||||
@CheckAttribute()
|
||||
def py__bases__(self):
|
||||
return tuple(
|
||||
create_from_access_path(self.evaluator, access)
|
||||
create_from_access_path(self.infer_state, access)
|
||||
for access in self.access_handle.py__bases__()
|
||||
)
|
||||
|
||||
@@ -178,7 +178,7 @@ class CompiledObject(Context):
|
||||
search_global shouldn't change the fact that there's one dict, this way
|
||||
there's only one `object`.
|
||||
"""
|
||||
return CompiledObjectFilter(self.evaluator, self, is_instance)
|
||||
return CompiledObjectFilter(self.infer_state, self, is_instance)
|
||||
|
||||
@CheckAttribute(u'__getitem__')
|
||||
def py__simple_getitem__(self, index):
|
||||
@@ -187,7 +187,7 @@ class CompiledObject(Context):
|
||||
if access is None:
|
||||
return NO_CONTEXTS
|
||||
|
||||
return ContextSet([create_from_access_path(self.evaluator, access)])
|
||||
return ContextSet([create_from_access_path(self.infer_state, access)])
|
||||
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
all_access_paths = self.access_handle.py__getitem__all_values()
|
||||
@@ -196,7 +196,7 @@ class CompiledObject(Context):
|
||||
# object.
|
||||
return super(CompiledObject, self).py__getitem__(index_context_set, contextualized_node)
|
||||
return ContextSet(
|
||||
create_from_access_path(self.evaluator, access)
|
||||
create_from_access_path(self.infer_state, access)
|
||||
for access in all_access_paths
|
||||
)
|
||||
|
||||
@@ -215,7 +215,7 @@ class CompiledObject(Context):
|
||||
return
|
||||
|
||||
for access in access_path_list:
|
||||
yield LazyKnownContext(create_from_access_path(self.evaluator, access))
|
||||
yield LazyKnownContext(create_from_access_path(self.infer_state, access))
|
||||
|
||||
def py__name__(self):
|
||||
return self.access_handle.py__name__()
|
||||
@@ -237,12 +237,12 @@ class CompiledObject(Context):
|
||||
try:
|
||||
# TODO wtf is this? this is exactly the same as the thing
|
||||
# below. It uses getattr as well.
|
||||
self.evaluator.builtins_module.access_handle.getattr_paths(name)
|
||||
self.infer_state.builtins_module.access_handle.getattr_paths(name)
|
||||
except AttributeError:
|
||||
continue
|
||||
else:
|
||||
bltn_obj = builtin_from_name(self.evaluator, name)
|
||||
for result in self.evaluator.execute(bltn_obj, params):
|
||||
bltn_obj = builtin_from_name(self.infer_state, name)
|
||||
for result in self.infer_state.execute(bltn_obj, params):
|
||||
yield result
|
||||
for type_ in docstrings.infer_return_types(self):
|
||||
yield type_
|
||||
@@ -257,20 +257,20 @@ class CompiledObject(Context):
|
||||
|
||||
def execute_operation(self, other, operator):
|
||||
return create_from_access_path(
|
||||
self.evaluator,
|
||||
self.infer_state,
|
||||
self.access_handle.execute_operation(other.access_handle, operator)
|
||||
)
|
||||
|
||||
def negate(self):
|
||||
return create_from_access_path(self.evaluator, self.access_handle.negate())
|
||||
return create_from_access_path(self.infer_state, self.access_handle.negate())
|
||||
|
||||
def get_metaclasses(self):
|
||||
return NO_CONTEXTS
|
||||
|
||||
|
||||
class CompiledName(AbstractNameDefinition):
|
||||
def __init__(self, evaluator, parent_context, name):
|
||||
self._evaluator = evaluator
|
||||
def __init__(self, infer_state, parent_context, name):
|
||||
self._infer_state = infer_state
|
||||
self.parent_context = parent_context
|
||||
self.string_name = name
|
||||
|
||||
@@ -296,7 +296,7 @@ class CompiledName(AbstractNameDefinition):
|
||||
@underscore_memoization
|
||||
def infer(self):
|
||||
return ContextSet([_create_from_name(
|
||||
self._evaluator, self.parent_context, self.string_name
|
||||
self._infer_state, self.parent_context, self.string_name
|
||||
)])
|
||||
|
||||
|
||||
@@ -322,12 +322,12 @@ class SignatureParamName(ParamNameInterface, AbstractNameDefinition):
|
||||
|
||||
def infer(self):
|
||||
p = self._signature_param
|
||||
evaluator = self.parent_context.evaluator
|
||||
infer_state = self.parent_context.infer_state
|
||||
contexts = NO_CONTEXTS
|
||||
if p.has_default:
|
||||
contexts = ContextSet([create_from_access_path(evaluator, p.default)])
|
||||
contexts = ContextSet([create_from_access_path(infer_state, p.default)])
|
||||
if p.has_annotation:
|
||||
annotation = create_from_access_path(evaluator, p.annotation)
|
||||
annotation = create_from_access_path(infer_state, p.annotation)
|
||||
contexts |= annotation.execute_with_values()
|
||||
return contexts
|
||||
|
||||
@@ -364,8 +364,8 @@ class EmptyCompiledName(AbstractNameDefinition):
|
||||
completions, just give Jedi the option to return this object. It infers to
|
||||
nothing.
|
||||
"""
|
||||
def __init__(self, evaluator, name):
|
||||
self.parent_context = evaluator.builtins_module
|
||||
def __init__(self, infer_state, name):
|
||||
self.parent_context = infer_state.builtins_module
|
||||
self.string_name = name
|
||||
|
||||
def infer(self):
|
||||
@@ -375,8 +375,8 @@ class EmptyCompiledName(AbstractNameDefinition):
|
||||
class CompiledObjectFilter(AbstractFilter):
|
||||
name_class = CompiledName
|
||||
|
||||
def __init__(self, evaluator, compiled_object, is_instance=False):
|
||||
self._evaluator = evaluator
|
||||
def __init__(self, infer_state, compiled_object, is_instance=False):
|
||||
self._infer_state = infer_state
|
||||
self.compiled_object = compiled_object
|
||||
self.is_instance = is_instance
|
||||
|
||||
@@ -399,7 +399,7 @@ class CompiledObjectFilter(AbstractFilter):
|
||||
# Always use unicode objects in Python 2 from here.
|
||||
name = force_unicode(name)
|
||||
|
||||
if (is_descriptor and not self._evaluator.allow_descriptor_getattr) or not has_attribute:
|
||||
if (is_descriptor and not self._infer_state.allow_descriptor_getattr) or not has_attribute:
|
||||
return [self._get_cached_name(name, is_empty=True)]
|
||||
|
||||
if self.is_instance and name not in dir_callback():
|
||||
@@ -409,7 +409,7 @@ class CompiledObjectFilter(AbstractFilter):
|
||||
@memoize_method
|
||||
def _get_cached_name(self, name, is_empty=False):
|
||||
if is_empty:
|
||||
return EmptyCompiledName(self._evaluator, name)
|
||||
return EmptyCompiledName(self._infer_state, name)
|
||||
else:
|
||||
return self._create_name(name)
|
||||
|
||||
@@ -426,12 +426,12 @@ class CompiledObjectFilter(AbstractFilter):
|
||||
|
||||
# ``dir`` doesn't include the type names.
|
||||
if not self.is_instance and needs_type_completions:
|
||||
for filter in builtin_from_name(self._evaluator, u'type').get_filters():
|
||||
for filter in builtin_from_name(self._infer_state, u'type').get_filters():
|
||||
names += filter.values()
|
||||
return names
|
||||
|
||||
def _create_name(self, name):
|
||||
return self.name_class(self._evaluator, self.compiled_object, name)
|
||||
return self.name_class(self._infer_state, self.compiled_object, name)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (self.__class__.__name__, self.compiled_object)
|
||||
@@ -507,7 +507,7 @@ def _parse_function_doc(doc):
|
||||
return param_str, ret
|
||||
|
||||
|
||||
def _create_from_name(evaluator, compiled_object, name):
|
||||
def _create_from_name(infer_state, compiled_object, name):
|
||||
access_paths = compiled_object.access_handle.getattr_paths(name, default=None)
|
||||
parent_context = compiled_object
|
||||
if parent_context.is_class():
|
||||
@@ -516,26 +516,26 @@ def _create_from_name(evaluator, compiled_object, name):
|
||||
context = None
|
||||
for access_path in access_paths:
|
||||
context = create_cached_compiled_object(
|
||||
evaluator, access_path, parent_context=context
|
||||
infer_state, access_path, parent_context=context
|
||||
)
|
||||
return context
|
||||
|
||||
|
||||
def _normalize_create_args(func):
|
||||
"""The cache doesn't care about keyword vs. normal args."""
|
||||
def wrapper(evaluator, obj, parent_context=None):
|
||||
return func(evaluator, obj, parent_context)
|
||||
def wrapper(infer_state, obj, parent_context=None):
|
||||
return func(infer_state, obj, parent_context)
|
||||
return wrapper
|
||||
|
||||
|
||||
def create_from_access_path(evaluator, access_path):
|
||||
def create_from_access_path(infer_state, access_path):
|
||||
parent_context = None
|
||||
for name, access in access_path.accesses:
|
||||
parent_context = create_cached_compiled_object(evaluator, access, parent_context)
|
||||
parent_context = create_cached_compiled_object(infer_state, access, parent_context)
|
||||
return parent_context
|
||||
|
||||
|
||||
@_normalize_create_args
|
||||
@evaluator_function_cache()
|
||||
def create_cached_compiled_object(evaluator, access_handle, parent_context):
|
||||
return CompiledObject(evaluator, access_handle, parent_context)
|
||||
@infer_state_function_cache()
|
||||
def create_cached_compiled_object(infer_state, access_handle, parent_context):
|
||||
return CompiledObject(infer_state, access_handle, parent_context)
|
||||
|
||||
@@ -15,7 +15,7 @@ from jedi.file_io import FileIO
|
||||
from jedi.inference.base_context import ContextSet, ContextWrapper
|
||||
from jedi.inference.helpers import SimpleGetItemNotFound
|
||||
from jedi.inference.context import ModuleContext
|
||||
from jedi.inference.cache import evaluator_function_cache
|
||||
from jedi.inference.cache import infer_state_function_cache
|
||||
from jedi.inference.compiled.getattr_static import getattr_static
|
||||
from jedi.inference.compiled.access import compiled_objects_cache, \
|
||||
ALLOWED_GETITEM_TYPES, get_api_type
|
||||
@@ -48,7 +48,7 @@ class MixedObject(ContextWrapper):
|
||||
self.access_handle = compiled_object.access_handle
|
||||
|
||||
def get_filters(self, *args, **kwargs):
|
||||
yield MixedObjectFilter(self.evaluator, self)
|
||||
yield MixedObjectFilter(self.infer_state, self)
|
||||
|
||||
def get_signatures(self):
|
||||
# Prefer `inspect.signature` over somehow analyzing Python code. It
|
||||
@@ -105,9 +105,9 @@ class MixedName(compiled.CompiledName):
|
||||
contexts = [None]
|
||||
for access in access_paths:
|
||||
contexts = ContextSet.from_sets(
|
||||
_create(self._evaluator, access, parent_context=c)
|
||||
_create(self._infer_state, access, parent_context=c)
|
||||
if c is None or isinstance(c, MixedObject)
|
||||
else ContextSet({create_cached_compiled_object(c.evaluator, access, c)})
|
||||
else ContextSet({create_cached_compiled_object(c.infer_state, access, c)})
|
||||
for c in contexts
|
||||
)
|
||||
return contexts
|
||||
@@ -121,9 +121,9 @@ class MixedObjectFilter(compiled.CompiledObjectFilter):
|
||||
name_class = MixedName
|
||||
|
||||
|
||||
@evaluator_function_cache()
|
||||
def _load_module(evaluator, path):
|
||||
module_node = evaluator.parse(
|
||||
@infer_state_function_cache()
|
||||
def _load_module(infer_state, path):
|
||||
module_node = infer_state.parse(
|
||||
path=path,
|
||||
cache=True,
|
||||
diff_cache=settings.fast_parser,
|
||||
@@ -131,7 +131,7 @@ def _load_module(evaluator, path):
|
||||
).get_root_node()
|
||||
# python_module = inspect.getmodule(python_object)
|
||||
# TODO we should actually make something like this possible.
|
||||
#evaluator.modules[python_module.__name__] = module_node
|
||||
#infer_state.modules[python_module.__name__] = module_node
|
||||
return module_node
|
||||
|
||||
|
||||
@@ -155,7 +155,7 @@ def _get_object_to_check(python_object):
|
||||
raise TypeError # Prevents computation of `repr` within inspect.
|
||||
|
||||
|
||||
def _find_syntax_node_name(evaluator, python_object):
|
||||
def _find_syntax_node_name(infer_state, python_object):
|
||||
original_object = python_object
|
||||
try:
|
||||
python_object = _get_object_to_check(python_object)
|
||||
@@ -168,13 +168,13 @@ def _find_syntax_node_name(evaluator, python_object):
|
||||
return None
|
||||
|
||||
file_io = FileIO(path)
|
||||
module_node = _load_module(evaluator, path)
|
||||
module_node = _load_module(infer_state, path)
|
||||
|
||||
if inspect.ismodule(python_object):
|
||||
# We don't need to check names for modules, because there's not really
|
||||
# a way to write a module in a module in Python (and also __name__ can
|
||||
# be something like ``email.utils``).
|
||||
code_lines = get_cached_code_lines(evaluator.grammar, path)
|
||||
code_lines = get_cached_code_lines(infer_state.grammar, path)
|
||||
return module_node, module_node, file_io, code_lines
|
||||
|
||||
try:
|
||||
@@ -214,7 +214,7 @@ def _find_syntax_node_name(evaluator, python_object):
|
||||
if line_names:
|
||||
names = line_names
|
||||
|
||||
code_lines = get_cached_code_lines(evaluator.grammar, path)
|
||||
code_lines = get_cached_code_lines(infer_state.grammar, path)
|
||||
# It's really hard to actually get the right definition, here as a last
|
||||
# resort we just return the last one. This chance might lead to odd
|
||||
# completions at some points but will lead to mostly correct type
|
||||
@@ -230,9 +230,9 @@ def _find_syntax_node_name(evaluator, python_object):
|
||||
|
||||
|
||||
@compiled_objects_cache('mixed_cache')
|
||||
def _create(evaluator, access_handle, parent_context, *args):
|
||||
def _create(infer_state, access_handle, parent_context, *args):
|
||||
compiled_object = create_cached_compiled_object(
|
||||
evaluator,
|
||||
infer_state,
|
||||
access_handle,
|
||||
parent_context=parent_context and parent_context.compiled_object
|
||||
)
|
||||
@@ -240,7 +240,7 @@ def _create(evaluator, access_handle, parent_context, *args):
|
||||
# TODO accessing this is bad, but it probably doesn't matter that much,
|
||||
# because we're working with interpreteters only here.
|
||||
python_object = access_handle.access._obj
|
||||
result = _find_syntax_node_name(evaluator, python_object)
|
||||
result = _find_syntax_node_name(infer_state, python_object)
|
||||
if result is None:
|
||||
# TODO Care about generics from stuff like `[1]` and don't return like this.
|
||||
if type(python_object) in (dict, list, tuple):
|
||||
@@ -257,14 +257,14 @@ def _create(evaluator, access_handle, parent_context, *args):
|
||||
name = compiled_object.get_root_context().py__name__()
|
||||
string_names = tuple(name.split('.'))
|
||||
module_context = ModuleContext(
|
||||
evaluator, module_node,
|
||||
infer_state, module_node,
|
||||
file_io=file_io,
|
||||
string_names=string_names,
|
||||
code_lines=code_lines,
|
||||
is_package=hasattr(compiled_object, 'py__path__'),
|
||||
)
|
||||
if name is not None:
|
||||
evaluator.module_cache.add(string_names, ContextSet([module_context]))
|
||||
infer_state.module_cache.add(string_names, ContextSet([module_context]))
|
||||
else:
|
||||
if parent_context.tree_node.get_root_node() != module_node:
|
||||
# This happens e.g. when __module__ is wrong, or when using
|
||||
|
||||
@@ -70,10 +70,10 @@ def _cleanup_process(process, thread):
|
||||
pass
|
||||
|
||||
|
||||
class _EvaluatorProcess(object):
|
||||
def __init__(self, evaluator):
|
||||
self._evaluator_weakref = weakref.ref(evaluator)
|
||||
self._evaluator_id = id(evaluator)
|
||||
class _InferStateProcess(object):
|
||||
def __init__(self, infer_state):
|
||||
self._infer_state_weakref = weakref.ref(infer_state)
|
||||
self._infer_state_id = id(infer_state)
|
||||
self._handles = {}
|
||||
|
||||
def get_or_create_access_handle(self, obj):
|
||||
@@ -81,7 +81,7 @@ class _EvaluatorProcess(object):
|
||||
try:
|
||||
return self.get_access_handle(id_)
|
||||
except KeyError:
|
||||
access = DirectObjectAccess(self._evaluator_weakref(), obj)
|
||||
access = DirectObjectAccess(self._infer_state_weakref(), obj)
|
||||
handle = AccessHandle(self, access, id_)
|
||||
self.set_access_handle(handle)
|
||||
return handle
|
||||
@@ -93,19 +93,19 @@ class _EvaluatorProcess(object):
|
||||
self._handles[handle.id] = handle
|
||||
|
||||
|
||||
class EvaluatorSameProcess(_EvaluatorProcess):
|
||||
class InferStateSameProcess(_InferStateProcess):
|
||||
"""
|
||||
Basically just an easy access to functions.py. It has the same API
|
||||
as EvaluatorSubprocess and does the same thing without using a subprocess.
|
||||
as InferStateSubprocess and does the same thing without using a subprocess.
|
||||
This is necessary for the Interpreter process.
|
||||
"""
|
||||
def __getattr__(self, name):
|
||||
return partial(_get_function(name), self._evaluator_weakref())
|
||||
return partial(_get_function(name), self._infer_state_weakref())
|
||||
|
||||
|
||||
class EvaluatorSubprocess(_EvaluatorProcess):
|
||||
def __init__(self, evaluator, compiled_subprocess):
|
||||
super(EvaluatorSubprocess, self).__init__(evaluator)
|
||||
class InferStateSubprocess(_InferStateProcess):
|
||||
def __init__(self, infer_state, compiled_subprocess):
|
||||
super(InferStateSubprocess, self).__init__(infer_state)
|
||||
self._used = False
|
||||
self._compiled_subprocess = compiled_subprocess
|
||||
|
||||
@@ -116,7 +116,7 @@ class EvaluatorSubprocess(_EvaluatorProcess):
|
||||
self._used = True
|
||||
|
||||
result = self._compiled_subprocess.run(
|
||||
self._evaluator_weakref(),
|
||||
self._infer_state_weakref(),
|
||||
func,
|
||||
args=args,
|
||||
kwargs=kwargs,
|
||||
@@ -148,7 +148,7 @@ class EvaluatorSubprocess(_EvaluatorProcess):
|
||||
|
||||
def __del__(self):
|
||||
if self._used and not self._compiled_subprocess.is_crashed:
|
||||
self._compiled_subprocess.delete_evaluator(self._evaluator_id)
|
||||
self._compiled_subprocess.delete_infer_state(self._infer_state_id)
|
||||
|
||||
|
||||
class CompiledSubprocess(object):
|
||||
@@ -158,7 +158,7 @@ class CompiledSubprocess(object):
|
||||
|
||||
def __init__(self, executable):
|
||||
self._executable = executable
|
||||
self._evaluator_deletion_queue = queue.deque()
|
||||
self._infer_state_deletion_queue = queue.deque()
|
||||
self._cleanup_callable = lambda: None
|
||||
|
||||
def __repr__(self):
|
||||
@@ -205,18 +205,18 @@ class CompiledSubprocess(object):
|
||||
t)
|
||||
return process
|
||||
|
||||
def run(self, evaluator, function, args=(), kwargs={}):
|
||||
# Delete old evaluators.
|
||||
def run(self, infer_state, function, args=(), kwargs={}):
|
||||
# Delete old infer_states.
|
||||
while True:
|
||||
try:
|
||||
evaluator_id = self._evaluator_deletion_queue.pop()
|
||||
infer_state_id = self._infer_state_deletion_queue.pop()
|
||||
except IndexError:
|
||||
break
|
||||
else:
|
||||
self._send(evaluator_id, None)
|
||||
self._send(infer_state_id, None)
|
||||
|
||||
assert callable(function)
|
||||
return self._send(id(evaluator), function, args, kwargs)
|
||||
return self._send(id(infer_state), function, args, kwargs)
|
||||
|
||||
def get_sys_path(self):
|
||||
return self._send(None, functions.get_sys_path, (), {})
|
||||
@@ -225,7 +225,7 @@ class CompiledSubprocess(object):
|
||||
self.is_crashed = True
|
||||
self._cleanup_callable()
|
||||
|
||||
def _send(self, evaluator_id, function, args=(), kwargs={}):
|
||||
def _send(self, infer_state_id, function, args=(), kwargs={}):
|
||||
if self.is_crashed:
|
||||
raise InternalError("The subprocess %s has crashed." % self._executable)
|
||||
|
||||
@@ -233,7 +233,7 @@ class CompiledSubprocess(object):
|
||||
# Python 2 compatibility
|
||||
kwargs = {force_unicode(key): value for key, value in kwargs.items()}
|
||||
|
||||
data = evaluator_id, function, args, kwargs
|
||||
data = infer_state_id, function, args, kwargs
|
||||
try:
|
||||
pickle_dump(data, self._get_process().stdin, self._pickle_protocol)
|
||||
except (socket.error, IOError) as e:
|
||||
@@ -272,59 +272,59 @@ class CompiledSubprocess(object):
|
||||
raise result
|
||||
return result
|
||||
|
||||
def delete_evaluator(self, evaluator_id):
|
||||
def delete_infer_state(self, infer_state_id):
|
||||
"""
|
||||
Currently we are not deleting evalutors instantly. They only get
|
||||
Currently we are not deleting infer_state instantly. They only get
|
||||
deleted once the subprocess is used again. It would probably a better
|
||||
solution to move all of this into a thread. However, the memory usage
|
||||
of a single evaluator shouldn't be that high.
|
||||
of a single infer_state shouldn't be that high.
|
||||
"""
|
||||
# With an argument - the evaluator gets deleted.
|
||||
self._evaluator_deletion_queue.append(evaluator_id)
|
||||
# With an argument - the infer_state gets deleted.
|
||||
self._infer_state_deletion_queue.append(infer_state_id)
|
||||
|
||||
|
||||
class Listener(object):
|
||||
def __init__(self, pickle_protocol):
|
||||
self._evaluators = {}
|
||||
self._infer_states = {}
|
||||
# TODO refactor so we don't need to process anymore just handle
|
||||
# controlling.
|
||||
self._process = _EvaluatorProcess(Listener)
|
||||
self._process = _InferStateProcess(Listener)
|
||||
self._pickle_protocol = pickle_protocol
|
||||
|
||||
def _get_evaluator(self, function, evaluator_id):
|
||||
from jedi.inference import Evaluator
|
||||
def _get_infer_state(self, function, infer_state_id):
|
||||
from jedi.inference import InferState
|
||||
|
||||
try:
|
||||
evaluator = self._evaluators[evaluator_id]
|
||||
infer_state = self._infer_states[infer_state_id]
|
||||
except KeyError:
|
||||
from jedi.api.environment import InterpreterEnvironment
|
||||
evaluator = Evaluator(
|
||||
infer_state = InferState(
|
||||
# The project is not actually needed. Nothing should need to
|
||||
# access it.
|
||||
project=None,
|
||||
environment=InterpreterEnvironment()
|
||||
)
|
||||
self._evaluators[evaluator_id] = evaluator
|
||||
return evaluator
|
||||
self._infer_states[infer_state_id] = infer_state
|
||||
return infer_state
|
||||
|
||||
def _run(self, evaluator_id, function, args, kwargs):
|
||||
if evaluator_id is None:
|
||||
def _run(self, infer_state_id, function, args, kwargs):
|
||||
if infer_state_id is None:
|
||||
return function(*args, **kwargs)
|
||||
elif function is None:
|
||||
del self._evaluators[evaluator_id]
|
||||
del self._infer_states[infer_state_id]
|
||||
else:
|
||||
evaluator = self._get_evaluator(function, evaluator_id)
|
||||
infer_state = self._get_infer_state(function, infer_state_id)
|
||||
|
||||
# Exchange all handles
|
||||
args = list(args)
|
||||
for i, arg in enumerate(args):
|
||||
if isinstance(arg, AccessHandle):
|
||||
args[i] = evaluator.compiled_subprocess.get_access_handle(arg.id)
|
||||
args[i] = infer_state.compiled_subprocess.get_access_handle(arg.id)
|
||||
for key, value in kwargs.items():
|
||||
if isinstance(value, AccessHandle):
|
||||
kwargs[key] = evaluator.compiled_subprocess.get_access_handle(value.id)
|
||||
kwargs[key] = infer_state.compiled_subprocess.get_access_handle(value.id)
|
||||
|
||||
return function(evaluator, *args, **kwargs)
|
||||
return function(infer_state, *args, **kwargs)
|
||||
|
||||
def listen(self):
|
||||
stdout = sys.stdout
|
||||
@@ -399,7 +399,7 @@ class AccessHandle(object):
|
||||
|
||||
@memoize_method
|
||||
def _cached_results(self, name, *args, **kwargs):
|
||||
#if type(self._subprocess) == EvaluatorSubprocess:
|
||||
#if type(self._subprocess) == InferStateSubprocess:
|
||||
#print(name, args, kwargs,
|
||||
#self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
|
||||
#)
|
||||
|
||||
@@ -12,20 +12,20 @@ def get_sys_path():
|
||||
return list(map(cast_path, sys.path))
|
||||
|
||||
|
||||
def load_module(evaluator, **kwargs):
|
||||
return access.load_module(evaluator, **kwargs)
|
||||
def load_module(infer_state, **kwargs):
|
||||
return access.load_module(infer_state, **kwargs)
|
||||
|
||||
|
||||
def get_compiled_method_return(evaluator, id, attribute, *args, **kwargs):
|
||||
handle = evaluator.compiled_subprocess.get_access_handle(id)
|
||||
def get_compiled_method_return(infer_state, id, attribute, *args, **kwargs):
|
||||
handle = infer_state.compiled_subprocess.get_access_handle(id)
|
||||
return getattr(handle.access, attribute)(*args, **kwargs)
|
||||
|
||||
|
||||
def create_simple_object(evaluator, obj):
|
||||
return access.create_access_path(evaluator, obj)
|
||||
def create_simple_object(infer_state, obj):
|
||||
return access.create_access_path(infer_state, obj)
|
||||
|
||||
|
||||
def get_module_info(evaluator, sys_path=None, full_name=None, **kwargs):
|
||||
def get_module_info(infer_state, sys_path=None, full_name=None, **kwargs):
|
||||
"""
|
||||
Returns Tuple[Union[NamespaceInfo, FileIO, None], Optional[bool]]
|
||||
"""
|
||||
@@ -40,25 +40,25 @@ def get_module_info(evaluator, sys_path=None, full_name=None, **kwargs):
|
||||
sys.path = temp
|
||||
|
||||
|
||||
def list_module_names(evaluator, search_path):
|
||||
def list_module_names(infer_state, search_path):
|
||||
return [
|
||||
force_unicode(name)
|
||||
for module_loader, name, is_pkg in iter_modules(search_path)
|
||||
]
|
||||
|
||||
|
||||
def get_builtin_module_names(evaluator):
|
||||
def get_builtin_module_names(infer_state):
|
||||
return list(map(force_unicode, sys.builtin_module_names))
|
||||
|
||||
|
||||
def _test_raise_error(evaluator, exception_type):
|
||||
def _test_raise_error(infer_state, exception_type):
|
||||
"""
|
||||
Raise an error to simulate certain problems for unit tests.
|
||||
"""
|
||||
raise exception_type
|
||||
|
||||
|
||||
def _test_print(evaluator, stderr=None, stdout=None):
|
||||
def _test_print(infer_state, stderr=None, stdout=None):
|
||||
"""
|
||||
Force some prints in the subprocesses. This exists for unit tests.
|
||||
"""
|
||||
@@ -82,5 +82,5 @@ def _get_init_path(directory_path):
|
||||
return None
|
||||
|
||||
|
||||
def safe_literal_eval(evaluator, value):
|
||||
def safe_literal_eval(infer_state, value):
|
||||
return parser_utils.safe_literal_eval(value)
|
||||
|
||||
@@ -2,7 +2,7 @@ from parso.python import tree
|
||||
|
||||
from jedi._compatibility import use_metaclass
|
||||
from jedi import debug
|
||||
from jedi.inference.cache import evaluator_method_cache, CachedMetaClass
|
||||
from jedi.inference.cache import infer_state_method_cache, CachedMetaClass
|
||||
from jedi.inference import compiled
|
||||
from jedi.inference import recursion
|
||||
from jedi.inference import docstrings
|
||||
@@ -58,7 +58,7 @@ class FunctionMixin(object):
|
||||
def get_filters(self, search_global=False, until_position=None, origin_scope=None):
|
||||
if search_global:
|
||||
yield ParserTreeFilter(
|
||||
self.evaluator,
|
||||
self.infer_state,
|
||||
context=self,
|
||||
until_position=until_position,
|
||||
origin_scope=origin_scope
|
||||
@@ -98,7 +98,7 @@ class FunctionMixin(object):
|
||||
if arguments is None:
|
||||
arguments = AnonymousArguments()
|
||||
|
||||
return FunctionExecutionContext(self.evaluator, self.parent_context, self, arguments)
|
||||
return FunctionExecutionContext(self.infer_state, self.parent_context, self, arguments)
|
||||
|
||||
def get_signatures(self):
|
||||
return [TreeSignature(f) for f in self.get_signature_functions()]
|
||||
@@ -113,14 +113,14 @@ class FunctionContext(use_metaclass(CachedMetaClass, FunctionMixin, FunctionAndC
|
||||
def create(tree_node):
|
||||
if context.is_class():
|
||||
return MethodContext(
|
||||
context.evaluator,
|
||||
context.infer_state,
|
||||
context,
|
||||
parent_context=parent_context,
|
||||
tree_node=tree_node
|
||||
)
|
||||
else:
|
||||
return cls(
|
||||
context.evaluator,
|
||||
context.infer_state,
|
||||
parent_context=parent_context,
|
||||
tree_node=tree_node
|
||||
)
|
||||
@@ -141,7 +141,7 @@ class FunctionContext(use_metaclass(CachedMetaClass, FunctionMixin, FunctionAndC
|
||||
return function
|
||||
|
||||
def py__class__(self):
|
||||
c, = contexts_from_qualified_names(self.evaluator, u'types', u'FunctionType')
|
||||
c, = contexts_from_qualified_names(self.infer_state, u'types', u'FunctionType')
|
||||
return c
|
||||
|
||||
def get_default_param_context(self):
|
||||
@@ -152,8 +152,8 @@ class FunctionContext(use_metaclass(CachedMetaClass, FunctionMixin, FunctionAndC
|
||||
|
||||
|
||||
class MethodContext(FunctionContext):
|
||||
def __init__(self, evaluator, class_context, *args, **kwargs):
|
||||
super(MethodContext, self).__init__(evaluator, *args, **kwargs)
|
||||
def __init__(self, infer_state, class_context, *args, **kwargs):
|
||||
super(MethodContext, self).__init__(infer_state, *args, **kwargs)
|
||||
self.class_context = class_context
|
||||
|
||||
def get_default_param_context(self):
|
||||
@@ -171,16 +171,16 @@ class MethodContext(FunctionContext):
|
||||
class FunctionExecutionContext(TreeContext):
|
||||
function_execution_filter = FunctionExecutionFilter
|
||||
|
||||
def __init__(self, evaluator, parent_context, function_context, var_args):
|
||||
def __init__(self, infer_state, parent_context, function_context, var_args):
|
||||
super(FunctionExecutionContext, self).__init__(
|
||||
evaluator,
|
||||
infer_state,
|
||||
parent_context,
|
||||
function_context.tree_node,
|
||||
)
|
||||
self.function_context = function_context
|
||||
self.var_args = var_args
|
||||
|
||||
@evaluator_method_cache(default=NO_CONTEXTS)
|
||||
@infer_state_method_cache(default=NO_CONTEXTS)
|
||||
@recursion.execution_recursion_decorator()
|
||||
def get_return_values(self, check_yields=False):
|
||||
funcdef = self.tree_node
|
||||
@@ -189,7 +189,7 @@ class FunctionExecutionContext(TreeContext):
|
||||
|
||||
if check_yields:
|
||||
context_set = NO_CONTEXTS
|
||||
returns = get_yield_exprs(self.evaluator, funcdef)
|
||||
returns = get_yield_exprs(self.infer_state, funcdef)
|
||||
else:
|
||||
returns = funcdef.iter_return_stmts()
|
||||
from jedi.inference.gradual.annotation import infer_return_types
|
||||
@@ -214,7 +214,7 @@ class FunctionExecutionContext(TreeContext):
|
||||
try:
|
||||
children = r.children
|
||||
except AttributeError:
|
||||
ctx = compiled.builtin_from_name(self.evaluator, u'None')
|
||||
ctx = compiled.builtin_from_name(self.infer_state, u'None')
|
||||
context_set |= ContextSet([ctx])
|
||||
else:
|
||||
context_set |= self.infer_node(children[1])
|
||||
@@ -226,7 +226,7 @@ class FunctionExecutionContext(TreeContext):
|
||||
def _get_yield_lazy_context(self, yield_expr):
|
||||
if yield_expr.type == 'keyword':
|
||||
# `yield` just yields None.
|
||||
ctx = compiled.builtin_from_name(self.evaluator, u'None')
|
||||
ctx = compiled.builtin_from_name(self.infer_state, u'None')
|
||||
yield LazyKnownContext(ctx)
|
||||
return
|
||||
|
||||
@@ -243,7 +243,7 @@ class FunctionExecutionContext(TreeContext):
|
||||
# TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend
|
||||
for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef',
|
||||
'while_stmt', 'if_stmt'))
|
||||
for y in get_yield_exprs(self.evaluator, self.tree_node)]
|
||||
for y in get_yield_exprs(self.infer_state, self.tree_node)]
|
||||
|
||||
# Calculate if the yields are placed within the same for loop.
|
||||
yields_order = []
|
||||
@@ -294,11 +294,11 @@ class FunctionExecutionContext(TreeContext):
|
||||
)
|
||||
|
||||
def get_filters(self, search_global=False, until_position=None, origin_scope=None):
|
||||
yield self.function_execution_filter(self.evaluator, self,
|
||||
yield self.function_execution_filter(self.infer_state, self,
|
||||
until_position=until_position,
|
||||
origin_scope=origin_scope)
|
||||
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def get_executed_params_and_issues(self):
|
||||
return self.var_args.get_executed_params_and_issues(self)
|
||||
|
||||
@@ -323,16 +323,16 @@ class FunctionExecutionContext(TreeContext):
|
||||
"""
|
||||
Created to be used by inheritance.
|
||||
"""
|
||||
evaluator = self.evaluator
|
||||
infer_state = self.infer_state
|
||||
is_coroutine = self.tree_node.parent.type in ('async_stmt', 'async_funcdef')
|
||||
is_generator = bool(get_yield_exprs(evaluator, self.tree_node))
|
||||
is_generator = bool(get_yield_exprs(infer_state, self.tree_node))
|
||||
from jedi.inference.gradual.typing import GenericClass
|
||||
|
||||
if is_coroutine:
|
||||
if is_generator:
|
||||
if evaluator.environment.version_info < (3, 6):
|
||||
if infer_state.environment.version_info < (3, 6):
|
||||
return NO_CONTEXTS
|
||||
async_generator_classes = evaluator.typing_module \
|
||||
async_generator_classes = infer_state.typing_module \
|
||||
.py__getattribute__('AsyncGenerator')
|
||||
|
||||
yield_contexts = self.merge_yield_contexts(is_async=True)
|
||||
@@ -344,9 +344,9 @@ class FunctionExecutionContext(TreeContext):
|
||||
for c in async_generator_classes
|
||||
).execute_annotation()
|
||||
else:
|
||||
if evaluator.environment.version_info < (3, 5):
|
||||
if infer_state.environment.version_info < (3, 5):
|
||||
return NO_CONTEXTS
|
||||
async_classes = evaluator.typing_module.py__getattribute__('Coroutine')
|
||||
async_classes = infer_state.typing_module.py__getattribute__('Coroutine')
|
||||
return_contexts = self.get_return_values()
|
||||
# Only the first generic is relevant.
|
||||
generics = (return_contexts.py__class__(), NO_CONTEXTS, NO_CONTEXTS)
|
||||
@@ -355,7 +355,7 @@ class FunctionExecutionContext(TreeContext):
|
||||
).execute_annotation()
|
||||
else:
|
||||
if is_generator:
|
||||
return ContextSet([iterable.Generator(evaluator, self)])
|
||||
return ContextSet([iterable.Generator(infer_state, self)])
|
||||
else:
|
||||
return self.get_return_values()
|
||||
|
||||
@@ -380,7 +380,7 @@ class OverloadedFunctionContext(FunctionMixin, ContextWrapper):
|
||||
if matched:
|
||||
return context_set
|
||||
|
||||
if self.evaluator.is_analysis:
|
||||
if self.infer_state.is_analysis:
|
||||
# In this case we want precision.
|
||||
return NO_CONTEXTS
|
||||
return ContextSet.from_sets(fe.infer() for fe in function_executions)
|
||||
@@ -412,7 +412,7 @@ def _find_overload_functions(context, tree_node):
|
||||
|
||||
while True:
|
||||
filter = ParserTreeFilter(
|
||||
context.evaluator,
|
||||
context.infer_state,
|
||||
context,
|
||||
until_position=tree_node.start_pos
|
||||
)
|
||||
|
||||
@@ -10,7 +10,7 @@ from jedi.inference.names import ContextName, TreeNameDefinition
|
||||
from jedi.inference.base_context import Context, NO_CONTEXTS, ContextSet, \
|
||||
iterator_to_context_set, ContextWrapper
|
||||
from jedi.inference.lazy_context import LazyKnownContext, LazyKnownContexts
|
||||
from jedi.inference.cache import evaluator_method_cache
|
||||
from jedi.inference.cache import infer_state_method_cache
|
||||
from jedi.inference.arguments import AnonymousArguments, \
|
||||
ValuesArguments, TreeArgumentsWrapper
|
||||
from jedi.inference.context.function import \
|
||||
@@ -50,7 +50,7 @@ class AnonymousInstanceArguments(AnonymousArguments):
|
||||
# executions of this function, we have all the params already.
|
||||
return [self_param], []
|
||||
executed_params = list(search_params(
|
||||
execution_context.evaluator,
|
||||
execution_context.infer_state,
|
||||
execution_context,
|
||||
execution_context.tree_node
|
||||
))
|
||||
@@ -61,8 +61,8 @@ class AnonymousInstanceArguments(AnonymousArguments):
|
||||
class AbstractInstanceContext(Context):
|
||||
api_type = u'instance'
|
||||
|
||||
def __init__(self, evaluator, parent_context, class_context, var_args):
|
||||
super(AbstractInstanceContext, self).__init__(evaluator, parent_context)
|
||||
def __init__(self, infer_state, parent_context, class_context, var_args):
|
||||
super(AbstractInstanceContext, self).__init__(infer_state, parent_context)
|
||||
# Generated instances are classes that are just generated by self
|
||||
# (No var_args) used.
|
||||
self.class_context = class_context
|
||||
@@ -117,7 +117,7 @@ class AbstractInstanceContext(Context):
|
||||
names = self.get_function_slot_names(u'__get__')
|
||||
if names:
|
||||
if obj is None:
|
||||
obj = compiled.builtin_from_name(self.evaluator, u'None')
|
||||
obj = compiled.builtin_from_name(self.infer_state, u'None')
|
||||
return self.execute_function_slots(names, obj, class_context)
|
||||
else:
|
||||
return ContextSet([self])
|
||||
@@ -132,7 +132,7 @@ class AbstractInstanceContext(Context):
|
||||
# In this case we're excluding compiled objects that are
|
||||
# not fake objects. It doesn't make sense for normal
|
||||
# compiled objects to search for self variables.
|
||||
yield SelfAttributeFilter(self.evaluator, self, cls, origin_scope)
|
||||
yield SelfAttributeFilter(self.infer_state, self, cls, origin_scope)
|
||||
|
||||
class_filters = class_context.get_filters(
|
||||
search_global=False,
|
||||
@@ -141,9 +141,9 @@ class AbstractInstanceContext(Context):
|
||||
)
|
||||
for f in class_filters:
|
||||
if isinstance(f, ClassFilter):
|
||||
yield InstanceClassFilter(self.evaluator, self, f)
|
||||
yield InstanceClassFilter(self.infer_state, self, f)
|
||||
elif isinstance(f, CompiledObjectFilter):
|
||||
yield CompiledInstanceClassFilter(self.evaluator, self, f)
|
||||
yield CompiledInstanceClassFilter(self.infer_state, self, f)
|
||||
else:
|
||||
# Propably from the metaclass.
|
||||
yield f
|
||||
@@ -168,7 +168,7 @@ class AbstractInstanceContext(Context):
|
||||
for generator in self.execute_function_slots(iter_slot_names):
|
||||
if generator.is_instance() and not generator.is_compiled():
|
||||
# `__next__` logic.
|
||||
if self.evaluator.environment.version_info.major == 2:
|
||||
if self.infer_state.environment.version_info.major == 2:
|
||||
name = u'next'
|
||||
else:
|
||||
name = u'__next__'
|
||||
@@ -199,7 +199,7 @@ class AbstractInstanceContext(Context):
|
||||
bound_method = BoundMethod(self, function)
|
||||
yield bound_method.get_function_execution(self.var_args)
|
||||
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def create_instance_context(self, class_context, node):
|
||||
if node.parent.type in ('funcdef', 'classdef'):
|
||||
node = node.parent
|
||||
@@ -219,7 +219,7 @@ class AbstractInstanceContext(Context):
|
||||
else:
|
||||
return bound_method.get_function_execution()
|
||||
elif scope.type == 'classdef':
|
||||
class_context = ClassContext(self.evaluator, parent_context, scope)
|
||||
class_context = ClassContext(self.infer_state, parent_context, scope)
|
||||
return class_context
|
||||
elif scope.type in ('comp_for', 'sync_comp_for'):
|
||||
# Comprehensions currently don't have a special scope in Jedi.
|
||||
@@ -238,9 +238,9 @@ class AbstractInstanceContext(Context):
|
||||
|
||||
|
||||
class CompiledInstance(AbstractInstanceContext):
|
||||
def __init__(self, evaluator, parent_context, class_context, var_args):
|
||||
def __init__(self, infer_state, parent_context, class_context, var_args):
|
||||
self._original_var_args = var_args
|
||||
super(CompiledInstance, self).__init__(evaluator, parent_context, class_context, var_args)
|
||||
super(CompiledInstance, self).__init__(infer_state, parent_context, class_context, var_args)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@@ -258,16 +258,16 @@ class CompiledInstance(AbstractInstanceContext):
|
||||
|
||||
|
||||
class TreeInstance(AbstractInstanceContext):
|
||||
def __init__(self, evaluator, parent_context, class_context, var_args):
|
||||
def __init__(self, infer_state, parent_context, class_context, var_args):
|
||||
# I don't think that dynamic append lookups should happen here. That
|
||||
# sounds more like something that should go to py__iter__.
|
||||
if class_context.py__name__() in ['list', 'set'] \
|
||||
and parent_context.get_root_context() == evaluator.builtins_module:
|
||||
and parent_context.get_root_context() == infer_state.builtins_module:
|
||||
# compare the module path with the builtin name.
|
||||
if settings.dynamic_array_additions:
|
||||
var_args = iterable.get_dynamic_array_instance(self, var_args)
|
||||
|
||||
super(TreeInstance, self).__init__(evaluator, parent_context,
|
||||
super(TreeInstance, self).__init__(infer_state, parent_context,
|
||||
class_context, var_args)
|
||||
self.tree_node = class_context.tree_node
|
||||
|
||||
@@ -277,7 +277,7 @@ class TreeInstance(AbstractInstanceContext):
|
||||
|
||||
# This can recurse, if the initialization of the class includes a reference
|
||||
# to itself.
|
||||
@evaluator_method_cache(default=None)
|
||||
@infer_state_method_cache(default=None)
|
||||
def _get_annotated_class_object(self):
|
||||
from jedi.inference.gradual.annotation import py__annotations__, \
|
||||
infer_type_vars_for_execution
|
||||
@@ -313,9 +313,9 @@ class TreeInstance(AbstractInstanceContext):
|
||||
|
||||
|
||||
class AnonymousInstance(TreeInstance):
|
||||
def __init__(self, evaluator, parent_context, class_context):
|
||||
def __init__(self, infer_state, parent_context, class_context):
|
||||
super(AnonymousInstance, self).__init__(
|
||||
evaluator,
|
||||
infer_state,
|
||||
parent_context,
|
||||
class_context,
|
||||
var_args=AnonymousInstanceArguments(self),
|
||||
@@ -327,9 +327,9 @@ class AnonymousInstance(TreeInstance):
|
||||
|
||||
class CompiledInstanceName(compiled.CompiledName):
|
||||
|
||||
def __init__(self, evaluator, instance, klass, name):
|
||||
def __init__(self, infer_state, instance, klass, name):
|
||||
super(CompiledInstanceName, self).__init__(
|
||||
evaluator,
|
||||
infer_state,
|
||||
klass.parent_context,
|
||||
name.string_name
|
||||
)
|
||||
@@ -348,8 +348,8 @@ class CompiledInstanceName(compiled.CompiledName):
|
||||
class CompiledInstanceClassFilter(AbstractFilter):
|
||||
name_class = CompiledInstanceName
|
||||
|
||||
def __init__(self, evaluator, instance, f):
|
||||
self._evaluator = evaluator
|
||||
def __init__(self, infer_state, instance, f):
|
||||
self._infer_state = infer_state
|
||||
self._instance = instance
|
||||
self._class_filter = f
|
||||
|
||||
@@ -362,7 +362,7 @@ class CompiledInstanceClassFilter(AbstractFilter):
|
||||
def _convert(self, names):
|
||||
klass = self._class_filter.compiled_object
|
||||
return [
|
||||
CompiledInstanceName(self._evaluator, self._instance, klass, n)
|
||||
CompiledInstanceName(self._infer_state, self._instance, klass, n)
|
||||
for n in names
|
||||
]
|
||||
|
||||
@@ -376,7 +376,7 @@ class BoundMethod(FunctionMixin, ContextWrapper):
|
||||
return True
|
||||
|
||||
def py__class__(self):
|
||||
c, = contexts_from_qualified_names(self.evaluator, u'types', u'MethodType')
|
||||
c, = contexts_from_qualified_names(self.infer_state, u'types', u'MethodType')
|
||||
return c
|
||||
|
||||
def _get_arguments(self, arguments):
|
||||
@@ -456,7 +456,7 @@ class InstanceClassFilter(AbstractFilter):
|
||||
resulting names in LazyINstanceClassName. The idea is that the class name
|
||||
filtering can be very flexible and always be reflected in instances.
|
||||
"""
|
||||
def __init__(self, evaluator, instance, class_filter):
|
||||
def __init__(self, infer_state, instance, class_filter):
|
||||
self._instance = instance
|
||||
self._class_filter = class_filter
|
||||
|
||||
@@ -479,9 +479,9 @@ class SelfAttributeFilter(ClassFilter):
|
||||
"""
|
||||
name_class = SelfName
|
||||
|
||||
def __init__(self, evaluator, context, class_context, origin_scope):
|
||||
def __init__(self, infer_state, context, class_context, origin_scope):
|
||||
super(SelfAttributeFilter, self).__init__(
|
||||
evaluator=evaluator,
|
||||
infer_state=infer_state,
|
||||
context=context,
|
||||
node_context=class_context,
|
||||
origin_scope=origin_scope,
|
||||
|
||||
@@ -34,7 +34,7 @@ from jedi.inference.helpers import get_int_or_none, is_string, \
|
||||
predefine_names, infer_call_of_leaf, reraise_getitem_errors, \
|
||||
SimpleGetItemNotFound
|
||||
from jedi.inference.utils import safe_property, to_list
|
||||
from jedi.inference.cache import evaluator_method_cache
|
||||
from jedi.inference.cache import infer_state_method_cache
|
||||
from jedi.inference.filters import ParserTreeFilter, LazyAttributeOverwrite, \
|
||||
publish_method
|
||||
from jedi.inference.base_context import ContextSet, Context, NO_CONTEXTS, \
|
||||
@@ -44,7 +44,7 @@ from jedi.parser_utils import get_sync_comp_fors
|
||||
|
||||
class IterableMixin(object):
|
||||
def py__stop_iteration_returns(self):
|
||||
return ContextSet([compiled.builtin_from_name(self.evaluator, u'None')])
|
||||
return ContextSet([compiled.builtin_from_name(self.infer_state, u'None')])
|
||||
|
||||
# At the moment, safe values are simple values like "foo", 1 and not
|
||||
# lists/dicts. Therefore as a small speed optimization we can just do the
|
||||
@@ -66,7 +66,7 @@ class GeneratorBase(LazyAttributeOverwrite, IterableMixin):
|
||||
array_type = None
|
||||
|
||||
def _get_wrapped_context(self):
|
||||
generator, = self.evaluator.typing_module \
|
||||
generator, = self.infer_state.typing_module \
|
||||
.py__getattribute__('Generator') \
|
||||
.execute_annotation()
|
||||
return generator
|
||||
@@ -88,7 +88,7 @@ class GeneratorBase(LazyAttributeOverwrite, IterableMixin):
|
||||
return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
|
||||
|
||||
def py__stop_iteration_returns(self):
|
||||
return ContextSet([compiled.builtin_from_name(self.evaluator, u'None')])
|
||||
return ContextSet([compiled.builtin_from_name(self.infer_state, u'None')])
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@@ -97,8 +97,8 @@ class GeneratorBase(LazyAttributeOverwrite, IterableMixin):
|
||||
|
||||
class Generator(GeneratorBase):
|
||||
"""Handling of `yield` functions."""
|
||||
def __init__(self, evaluator, func_execution_context):
|
||||
super(Generator, self).__init__(evaluator)
|
||||
def __init__(self, infer_state, func_execution_context):
|
||||
super(Generator, self).__init__(infer_state)
|
||||
self._func_execution_context = func_execution_context
|
||||
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
@@ -114,13 +114,13 @@ class Generator(GeneratorBase):
|
||||
class CompForContext(TreeContext):
|
||||
@classmethod
|
||||
def from_comp_for(cls, parent_context, comp_for):
|
||||
return cls(parent_context.evaluator, parent_context, comp_for)
|
||||
return cls(parent_context.infer_state, parent_context, comp_for)
|
||||
|
||||
def get_filters(self, search_global=False, until_position=None, origin_scope=None):
|
||||
yield ParserTreeFilter(self.evaluator, self)
|
||||
yield ParserTreeFilter(self.infer_state, self)
|
||||
|
||||
|
||||
def comprehension_from_atom(evaluator, context, atom):
|
||||
def comprehension_from_atom(infer_state, context, atom):
|
||||
bracket = atom.children[0]
|
||||
test_list_comp = atom.children[1]
|
||||
|
||||
@@ -131,7 +131,7 @@ def comprehension_from_atom(evaluator, context, atom):
|
||||
sync_comp_for = sync_comp_for.children[1]
|
||||
|
||||
return DictComprehension(
|
||||
evaluator,
|
||||
infer_state,
|
||||
context,
|
||||
sync_comp_for_node=sync_comp_for,
|
||||
key_node=test_list_comp.children[0],
|
||||
@@ -149,7 +149,7 @@ def comprehension_from_atom(evaluator, context, atom):
|
||||
sync_comp_for = sync_comp_for.children[1]
|
||||
|
||||
return cls(
|
||||
evaluator,
|
||||
infer_state,
|
||||
defining_context=context,
|
||||
sync_comp_for_node=sync_comp_for,
|
||||
entry_node=test_list_comp.children[0],
|
||||
@@ -157,7 +157,7 @@ def comprehension_from_atom(evaluator, context, atom):
|
||||
|
||||
|
||||
class ComprehensionMixin(object):
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def _get_comp_for_context(self, parent_context, comp_for):
|
||||
return CompForContext.from_comp_for(parent_context, comp_for)
|
||||
|
||||
@@ -192,7 +192,7 @@ class ComprehensionMixin(object):
|
||||
else:
|
||||
yield iterated
|
||||
|
||||
@evaluator_method_cache(default=[])
|
||||
@infer_state_method_cache(default=[])
|
||||
@to_list
|
||||
def _iterate(self):
|
||||
comp_fors = tuple(get_sync_comp_fors(self._sync_comp_for_node))
|
||||
@@ -224,7 +224,7 @@ class Sequence(LazyAttributeOverwrite, IterableMixin):
|
||||
|
||||
def _get_wrapped_context(self):
|
||||
from jedi.inference.gradual.typing import GenericClass
|
||||
klass = compiled.builtin_from_name(self.evaluator, self.array_type)
|
||||
klass = compiled.builtin_from_name(self.infer_state, self.array_type)
|
||||
c, = GenericClass(klass, self._get_generics()).execute_annotation()
|
||||
return c
|
||||
|
||||
@@ -232,11 +232,11 @@ class Sequence(LazyAttributeOverwrite, IterableMixin):
|
||||
return None # We don't know the length, because of appends.
|
||||
|
||||
def py__class__(self):
|
||||
return compiled.builtin_from_name(self.evaluator, self.array_type)
|
||||
return compiled.builtin_from_name(self.infer_state, self.array_type)
|
||||
|
||||
@safe_property
|
||||
def parent(self):
|
||||
return self.evaluator.builtins_module
|
||||
return self.infer_state.builtins_module
|
||||
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
if self.array_type == 'dict':
|
||||
@@ -245,9 +245,9 @@ class Sequence(LazyAttributeOverwrite, IterableMixin):
|
||||
|
||||
|
||||
class _BaseComprehension(ComprehensionMixin):
|
||||
def __init__(self, evaluator, defining_context, sync_comp_for_node, entry_node):
|
||||
def __init__(self, infer_state, defining_context, sync_comp_for_node, entry_node):
|
||||
assert sync_comp_for_node.type == 'sync_comp_for'
|
||||
super(_BaseComprehension, self).__init__(evaluator)
|
||||
super(_BaseComprehension, self).__init__(infer_state)
|
||||
self._defining_context = defining_context
|
||||
self._sync_comp_for_node = sync_comp_for_node
|
||||
self._entry_node = entry_node
|
||||
@@ -277,9 +277,9 @@ class GeneratorComprehension(_BaseComprehension, GeneratorBase):
|
||||
class DictComprehension(ComprehensionMixin, Sequence):
|
||||
array_type = u'dict'
|
||||
|
||||
def __init__(self, evaluator, defining_context, sync_comp_for_node, key_node, value_node):
|
||||
def __init__(self, infer_state, defining_context, sync_comp_for_node, key_node, value_node):
|
||||
assert sync_comp_for_node.type == 'sync_comp_for'
|
||||
super(DictComprehension, self).__init__(evaluator)
|
||||
super(DictComprehension, self).__init__(infer_state)
|
||||
self._defining_context = defining_context
|
||||
self._sync_comp_for_node = sync_comp_for_node
|
||||
self._entry_node = key_node
|
||||
@@ -308,14 +308,14 @@ class DictComprehension(ComprehensionMixin, Sequence):
|
||||
@publish_method('values')
|
||||
def _imitate_values(self):
|
||||
lazy_context = LazyKnownContexts(self._dict_values())
|
||||
return ContextSet([FakeSequence(self.evaluator, u'list', [lazy_context])])
|
||||
return ContextSet([FakeSequence(self.infer_state, u'list', [lazy_context])])
|
||||
|
||||
@publish_method('items')
|
||||
def _imitate_items(self):
|
||||
lazy_contexts = [
|
||||
LazyKnownContext(
|
||||
FakeSequence(
|
||||
self.evaluator,
|
||||
self.infer_state,
|
||||
u'tuple',
|
||||
[LazyKnownContexts(key),
|
||||
LazyKnownContexts(value)]
|
||||
@@ -324,7 +324,7 @@ class DictComprehension(ComprehensionMixin, Sequence):
|
||||
for key, value in self._iterate()
|
||||
]
|
||||
|
||||
return ContextSet([FakeSequence(self.evaluator, u'list', lazy_contexts)])
|
||||
return ContextSet([FakeSequence(self.infer_state, u'list', lazy_contexts)])
|
||||
|
||||
def get_mapping_item_contexts(self):
|
||||
return self._dict_keys(), self._dict_values()
|
||||
@@ -341,8 +341,8 @@ class SequenceLiteralContext(Sequence):
|
||||
'[': u'list',
|
||||
'{': u'set'}
|
||||
|
||||
def __init__(self, evaluator, defining_context, atom):
|
||||
super(SequenceLiteralContext, self).__init__(evaluator)
|
||||
def __init__(self, infer_state, defining_context, atom):
|
||||
super(SequenceLiteralContext, self).__init__(infer_state)
|
||||
self.atom = atom
|
||||
self._defining_context = defining_context
|
||||
|
||||
@@ -355,7 +355,7 @@ class SequenceLiteralContext(Sequence):
|
||||
def py__simple_getitem__(self, index):
|
||||
"""Here the index is an int/str. Raises IndexError/KeyError."""
|
||||
if self.array_type == u'dict':
|
||||
compiled_obj_index = compiled.create_simple_object(self.evaluator, index)
|
||||
compiled_obj_index = compiled.create_simple_object(self.infer_state, index)
|
||||
for key, value in self.get_tree_entries():
|
||||
for k in self._defining_context.infer_node(key):
|
||||
try:
|
||||
@@ -471,27 +471,27 @@ class SequenceLiteralContext(Sequence):
|
||||
class DictLiteralContext(_DictMixin, SequenceLiteralContext):
|
||||
array_type = u'dict'
|
||||
|
||||
def __init__(self, evaluator, defining_context, atom):
|
||||
super(SequenceLiteralContext, self).__init__(evaluator)
|
||||
def __init__(self, infer_state, defining_context, atom):
|
||||
super(SequenceLiteralContext, self).__init__(infer_state)
|
||||
self._defining_context = defining_context
|
||||
self.atom = atom
|
||||
|
||||
@publish_method('values')
|
||||
def _imitate_values(self):
|
||||
lazy_context = LazyKnownContexts(self._dict_values())
|
||||
return ContextSet([FakeSequence(self.evaluator, u'list', [lazy_context])])
|
||||
return ContextSet([FakeSequence(self.infer_state, u'list', [lazy_context])])
|
||||
|
||||
@publish_method('items')
|
||||
def _imitate_items(self):
|
||||
lazy_contexts = [
|
||||
LazyKnownContext(FakeSequence(
|
||||
self.evaluator, u'tuple',
|
||||
self.infer_state, u'tuple',
|
||||
(LazyTreeContext(self._defining_context, key_node),
|
||||
LazyTreeContext(self._defining_context, value_node))
|
||||
)) for key_node, value_node in self.get_tree_entries()
|
||||
]
|
||||
|
||||
return ContextSet([FakeSequence(self.evaluator, u'list', lazy_contexts)])
|
||||
return ContextSet([FakeSequence(self.infer_state, u'list', lazy_contexts)])
|
||||
|
||||
def _dict_keys(self):
|
||||
return ContextSet.from_sets(
|
||||
@@ -504,19 +504,19 @@ class DictLiteralContext(_DictMixin, SequenceLiteralContext):
|
||||
|
||||
|
||||
class _FakeArray(SequenceLiteralContext):
|
||||
def __init__(self, evaluator, container, type):
|
||||
super(SequenceLiteralContext, self).__init__(evaluator)
|
||||
def __init__(self, infer_state, container, type):
|
||||
super(SequenceLiteralContext, self).__init__(infer_state)
|
||||
self.array_type = type
|
||||
self.atom = container
|
||||
# TODO is this class really needed?
|
||||
|
||||
|
||||
class FakeSequence(_FakeArray):
|
||||
def __init__(self, evaluator, array_type, lazy_context_list):
|
||||
def __init__(self, infer_state, array_type, lazy_context_list):
|
||||
"""
|
||||
type should be one of "tuple", "list"
|
||||
"""
|
||||
super(FakeSequence, self).__init__(evaluator, None, array_type)
|
||||
super(FakeSequence, self).__init__(infer_state, None, array_type)
|
||||
self._lazy_context_list = lazy_context_list
|
||||
|
||||
def py__simple_getitem__(self, index):
|
||||
@@ -538,16 +538,16 @@ class FakeSequence(_FakeArray):
|
||||
|
||||
|
||||
class FakeDict(_DictMixin, _FakeArray):
|
||||
def __init__(self, evaluator, dct):
|
||||
super(FakeDict, self).__init__(evaluator, dct, u'dict')
|
||||
def __init__(self, infer_state, dct):
|
||||
super(FakeDict, self).__init__(infer_state, dct, u'dict')
|
||||
self._dct = dct
|
||||
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
for key in self._dct:
|
||||
yield LazyKnownContext(compiled.create_simple_object(self.evaluator, key))
|
||||
yield LazyKnownContext(compiled.create_simple_object(self.infer_state, key))
|
||||
|
||||
def py__simple_getitem__(self, index):
|
||||
if is_py3 and self.evaluator.environment.version_info.major == 2:
|
||||
if is_py3 and self.infer_state.environment.version_info.major == 2:
|
||||
# In Python 2 bytes and unicode compare.
|
||||
if isinstance(index, bytes):
|
||||
index_unicode = force_unicode(index)
|
||||
@@ -569,7 +569,7 @@ class FakeDict(_DictMixin, _FakeArray):
|
||||
@publish_method('values')
|
||||
def _values(self):
|
||||
return ContextSet([FakeSequence(
|
||||
self.evaluator, u'tuple',
|
||||
self.infer_state, u'tuple',
|
||||
[LazyKnownContexts(self._dict_values())]
|
||||
)])
|
||||
|
||||
@@ -587,8 +587,8 @@ class FakeDict(_DictMixin, _FakeArray):
|
||||
|
||||
|
||||
class MergedArray(_FakeArray):
|
||||
def __init__(self, evaluator, arrays):
|
||||
super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].array_type)
|
||||
def __init__(self, infer_state, arrays):
|
||||
super(MergedArray, self).__init__(infer_state, arrays, arrays[-1].array_type)
|
||||
self._arrays = arrays
|
||||
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
@@ -657,7 +657,7 @@ def check_array_additions(context, sequence):
|
||||
return _check_array_additions(context, sequence)
|
||||
|
||||
|
||||
@evaluator_method_cache(default=NO_CONTEXTS)
|
||||
@infer_state_method_cache(default=NO_CONTEXTS)
|
||||
@debug.increase_indent
|
||||
def _check_array_additions(context, sequence):
|
||||
"""
|
||||
@@ -675,7 +675,7 @@ def _check_array_additions(context, sequence):
|
||||
return NO_CONTEXTS
|
||||
|
||||
def find_additions(context, arglist, add_name):
|
||||
params = list(arguments.TreeArguments(context.evaluator, context, arglist).unpack())
|
||||
params = list(arguments.TreeArguments(context.infer_state, context, arglist).unpack())
|
||||
result = set()
|
||||
if add_name in ['insert']:
|
||||
params = params[1:]
|
||||
@@ -719,7 +719,7 @@ def _check_array_additions(context, sequence):
|
||||
|
||||
random_context = context.create_context(name)
|
||||
|
||||
with recursion.execution_allowed(context.evaluator, power) as allowed:
|
||||
with recursion.execution_allowed(context.infer_state, power) as allowed:
|
||||
if allowed:
|
||||
found = infer_call_of_leaf(
|
||||
random_context,
|
||||
@@ -758,7 +758,7 @@ class _ArrayInstance(HelperContextMixin):
|
||||
self.var_args = var_args
|
||||
|
||||
def py__class__(self):
|
||||
tuple_, = self.instance.evaluator.builtins_module.py__getattribute__('tuple')
|
||||
tuple_, = self.instance.infer_state.builtins_module.py__getattribute__('tuple')
|
||||
return tuple_
|
||||
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
@@ -792,7 +792,7 @@ class Slice(object):
|
||||
|
||||
def __getattr__(self, name):
|
||||
if self._slice_object is None:
|
||||
context = compiled.builtin_from_name(self._context.evaluator, 'slice')
|
||||
context = compiled.builtin_from_name(self._context.infer_state, 'slice')
|
||||
self._slice_object, = context.execute_with_values()
|
||||
return getattr(self._slice_object, name)
|
||||
|
||||
|
||||
@@ -39,8 +39,8 @@ py__doc__() Returns the docstring for a context.
|
||||
from jedi import debug
|
||||
from jedi._compatibility import use_metaclass
|
||||
from jedi.parser_utils import get_cached_parent_scope
|
||||
from jedi.inference.cache import evaluator_method_cache, CachedMetaClass, \
|
||||
evaluator_method_generator_cache
|
||||
from jedi.inference.cache import infer_state_method_cache, CachedMetaClass, \
|
||||
infer_state_method_generator_cache
|
||||
from jedi.inference import compiled
|
||||
from jedi.inference.lazy_context import LazyKnownContexts
|
||||
from jedi.inference.filters import ParserTreeFilter
|
||||
@@ -73,7 +73,7 @@ class ClassName(TreeNameDefinition):
|
||||
# We're using a different context to infer, so we cannot call super().
|
||||
from jedi.inference.syntax_tree import tree_name_to_contexts
|
||||
inferred = tree_name_to_contexts(
|
||||
self.parent_context.evaluator, self._name_context, self.tree_name)
|
||||
self.parent_context.infer_state, self._name_context, self.tree_name)
|
||||
|
||||
for result_context in inferred:
|
||||
if self._apply_decorators:
|
||||
@@ -141,10 +141,10 @@ class ClassMixin(object):
|
||||
from jedi.inference.context import TreeInstance
|
||||
if arguments is None:
|
||||
arguments = ValuesArguments([])
|
||||
return ContextSet([TreeInstance(self.evaluator, self.parent_context, self, arguments)])
|
||||
return ContextSet([TreeInstance(self.infer_state, self.parent_context, self, arguments)])
|
||||
|
||||
def py__class__(self):
|
||||
return compiled.builtin_from_name(self.evaluator, u'type')
|
||||
return compiled.builtin_from_name(self.infer_state, u'type')
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@@ -159,7 +159,7 @@ class ClassMixin(object):
|
||||
return list(context_.get_param_names())[1:]
|
||||
return []
|
||||
|
||||
@evaluator_method_generator_cache()
|
||||
@infer_state_method_generator_cache()
|
||||
def py__mro__(self):
|
||||
mro = [self]
|
||||
yield self
|
||||
@@ -208,13 +208,13 @@ class ClassMixin(object):
|
||||
yield filter
|
||||
else:
|
||||
yield ClassFilter(
|
||||
self.evaluator, self, node_context=cls,
|
||||
self.infer_state, self, node_context=cls,
|
||||
origin_scope=origin_scope,
|
||||
is_instance=is_instance
|
||||
)
|
||||
if not is_instance:
|
||||
from jedi.inference.compiled import builtin_from_name
|
||||
type_ = builtin_from_name(self.evaluator, u'type')
|
||||
type_ = builtin_from_name(self.infer_state, u'type')
|
||||
assert isinstance(type_, ClassContext)
|
||||
if type_ != self:
|
||||
for instance in type_.py__call__():
|
||||
@@ -230,7 +230,7 @@ class ClassMixin(object):
|
||||
|
||||
def get_global_filter(self, until_position=None, origin_scope=None):
|
||||
return ParserTreeFilter(
|
||||
self.evaluator,
|
||||
self.infer_state,
|
||||
context=self,
|
||||
until_position=until_position,
|
||||
origin_scope=origin_scope
|
||||
@@ -240,7 +240,7 @@ class ClassMixin(object):
|
||||
class ClassContext(use_metaclass(CachedMetaClass, ClassMixin, FunctionAndClassBase)):
|
||||
api_type = u'class'
|
||||
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def list_type_vars(self):
|
||||
found = []
|
||||
arglist = self.tree_node.get_super_arglist()
|
||||
@@ -262,10 +262,10 @@ class ClassContext(use_metaclass(CachedMetaClass, ClassMixin, FunctionAndClassBa
|
||||
arglist = self.tree_node.get_super_arglist()
|
||||
if arglist:
|
||||
from jedi.inference import arguments
|
||||
return arguments.TreeArguments(self.evaluator, self.parent_context, arglist)
|
||||
return arguments.TreeArguments(self.infer_state, self.parent_context, arglist)
|
||||
return None
|
||||
|
||||
@evaluator_method_cache(default=())
|
||||
@infer_state_method_cache(default=())
|
||||
def py__bases__(self):
|
||||
args = self._get_bases_arguments()
|
||||
if args is not None:
|
||||
@@ -274,10 +274,10 @@ class ClassContext(use_metaclass(CachedMetaClass, ClassMixin, FunctionAndClassBa
|
||||
return lst
|
||||
|
||||
if self.py__name__() == 'object' \
|
||||
and self.parent_context == self.evaluator.builtins_module:
|
||||
and self.parent_context == self.infer_state.builtins_module:
|
||||
return []
|
||||
return [LazyKnownContexts(
|
||||
self.evaluator.builtins_module.py__getattribute__('object')
|
||||
self.infer_state.builtins_module.py__getattribute__('object')
|
||||
)]
|
||||
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
@@ -321,7 +321,7 @@ class ClassContext(use_metaclass(CachedMetaClass, ClassMixin, FunctionAndClassBa
|
||||
debug.dbg('Unprocessed metaclass %s', metaclass)
|
||||
return []
|
||||
|
||||
@evaluator_method_cache(default=NO_CONTEXTS)
|
||||
@infer_state_method_cache(default=NO_CONTEXTS)
|
||||
def get_metaclasses(self):
|
||||
args = self._get_bases_arguments()
|
||||
if args is not None:
|
||||
|
||||
@@ -2,7 +2,7 @@ import re
|
||||
import os
|
||||
|
||||
from jedi import debug
|
||||
from jedi.inference.cache import evaluator_method_cache
|
||||
from jedi.inference.cache import infer_state_method_cache
|
||||
from jedi.inference.names import ContextNameMixin, AbstractNameDefinition
|
||||
from jedi.inference.filters import GlobalNameFilter, ParserTreeFilter, DictFilter, MergedFilter
|
||||
from jedi.inference import compiled
|
||||
@@ -27,13 +27,13 @@ class _ModuleAttributeName(AbstractNameDefinition):
|
||||
def infer(self):
|
||||
if self._string_value is not None:
|
||||
s = self._string_value
|
||||
if self.parent_context.evaluator.environment.version_info.major == 2 \
|
||||
if self.parent_context.infer_state.environment.version_info.major == 2 \
|
||||
and not isinstance(s, bytes):
|
||||
s = s.encode('utf-8')
|
||||
return ContextSet([
|
||||
create_simple_object(self.parent_context.evaluator, s)
|
||||
create_simple_object(self.parent_context.infer_state, s)
|
||||
])
|
||||
return compiled.get_string_context_set(self.parent_context.evaluator)
|
||||
return compiled.get_string_context_set(self.parent_context.infer_state)
|
||||
|
||||
|
||||
class ModuleName(ContextNameMixin, AbstractNameDefinition):
|
||||
@@ -48,9 +48,9 @@ class ModuleName(ContextNameMixin, AbstractNameDefinition):
|
||||
return self._name
|
||||
|
||||
|
||||
def iter_module_names(evaluator, paths):
|
||||
def iter_module_names(infer_state, paths):
|
||||
# Python modules/packages
|
||||
for n in evaluator.compiled_subprocess.list_module_names(paths):
|
||||
for n in infer_state.compiled_subprocess.list_module_names(paths):
|
||||
yield n
|
||||
|
||||
for path in paths:
|
||||
@@ -75,7 +75,7 @@ def iter_module_names(evaluator, paths):
|
||||
|
||||
|
||||
class SubModuleDictMixin(object):
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def sub_modules_dict(self):
|
||||
"""
|
||||
Lists modules in the directory of this module (if this module is a
|
||||
@@ -87,7 +87,7 @@ class SubModuleDictMixin(object):
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
mods = iter_module_names(self.evaluator, method())
|
||||
mods = iter_module_names(self.infer_state, method())
|
||||
for name in mods:
|
||||
# It's obviously a relative import to the current module.
|
||||
names[name] = SubModuleName(self, name)
|
||||
@@ -101,7 +101,7 @@ class ModuleMixin(SubModuleDictMixin):
|
||||
def get_filters(self, search_global=False, until_position=None, origin_scope=None):
|
||||
yield MergedFilter(
|
||||
ParserTreeFilter(
|
||||
self.evaluator,
|
||||
self.infer_state,
|
||||
context=self,
|
||||
until_position=until_position,
|
||||
origin_scope=origin_scope
|
||||
@@ -114,7 +114,7 @@ class ModuleMixin(SubModuleDictMixin):
|
||||
yield star_filter
|
||||
|
||||
def py__class__(self):
|
||||
c, = contexts_from_qualified_names(self.evaluator, u'types', u'ModuleType')
|
||||
c, = contexts_from_qualified_names(self.infer_state, u'types', u'ModuleType')
|
||||
return c
|
||||
|
||||
def is_module(self):
|
||||
@@ -124,7 +124,7 @@ class ModuleMixin(SubModuleDictMixin):
|
||||
return False
|
||||
|
||||
@property
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def name(self):
|
||||
return ModuleName(self, self._string_name)
|
||||
|
||||
@@ -141,7 +141,7 @@ class ModuleMixin(SubModuleDictMixin):
|
||||
# Remove PEP 3149 names
|
||||
return re.sub(r'\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1))
|
||||
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def _module_attributes_dict(self):
|
||||
names = ['__package__', '__doc__', '__name__']
|
||||
# All the additional module attributes are strings.
|
||||
@@ -157,8 +157,8 @@ class ModuleMixin(SubModuleDictMixin):
|
||||
|
||||
# I'm not sure if the star import cache is really that effective anymore
|
||||
# with all the other really fast import caches. Recheck. Also we would need
|
||||
# to push the star imports into Evaluator.module_cache, if we reenable this.
|
||||
@evaluator_method_cache([])
|
||||
# to push the star imports into InferState.module_cache, if we reenable this.
|
||||
@infer_state_method_cache([])
|
||||
def star_imports(self):
|
||||
from jedi.inference.imports import Importer
|
||||
|
||||
@@ -166,7 +166,7 @@ class ModuleMixin(SubModuleDictMixin):
|
||||
for i in self.tree_node.iter_imports():
|
||||
if i.is_star_import():
|
||||
new = Importer(
|
||||
self.evaluator,
|
||||
self.infer_state,
|
||||
import_path=i.get_paths()[-1],
|
||||
module_context=self,
|
||||
level=i.level
|
||||
@@ -191,9 +191,9 @@ class ModuleContext(ModuleMixin, TreeContext):
|
||||
api_type = u'module'
|
||||
parent_context = None
|
||||
|
||||
def __init__(self, evaluator, module_node, file_io, string_names, code_lines, is_package=False):
|
||||
def __init__(self, infer_state, module_node, file_io, string_names, code_lines, is_package=False):
|
||||
super(ModuleContext, self).__init__(
|
||||
evaluator,
|
||||
infer_state,
|
||||
parent_context=None,
|
||||
tree_node=module_node
|
||||
)
|
||||
@@ -243,7 +243,7 @@ class ModuleContext(ModuleMixin, TreeContext):
|
||||
# It is a namespace, now try to find the rest of the
|
||||
# modules on sys_path or whatever the search_path is.
|
||||
paths = set()
|
||||
for s in self.evaluator.get_sys_path():
|
||||
for s in self.infer_state.get_sys_path():
|
||||
other = os.path.join(s, self.name.string_name)
|
||||
if os.path.isdir(other):
|
||||
paths.add(other)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from jedi.inference.cache import evaluator_method_cache
|
||||
from jedi.inference.cache import infer_state_method_cache
|
||||
from jedi.inference.filters import DictFilter
|
||||
from jedi.inference.names import ContextNameMixin, AbstractNameDefinition
|
||||
from jedi.inference.base_context import Context
|
||||
@@ -25,9 +25,9 @@ class ImplicitNamespaceContext(Context, SubModuleDictMixin):
|
||||
api_type = u'module'
|
||||
parent_context = None
|
||||
|
||||
def __init__(self, evaluator, fullname, paths):
|
||||
super(ImplicitNamespaceContext, self).__init__(evaluator, parent_context=None)
|
||||
self.evaluator = evaluator
|
||||
def __init__(self, infer_state, fullname, paths):
|
||||
super(ImplicitNamespaceContext, self).__init__(infer_state, parent_context=None)
|
||||
self.infer_state = infer_state
|
||||
self._fullname = fullname
|
||||
self._paths = paths
|
||||
|
||||
@@ -35,7 +35,7 @@ class ImplicitNamespaceContext(Context, SubModuleDictMixin):
|
||||
yield DictFilter(self.sub_modules_dict())
|
||||
|
||||
@property
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def name(self):
|
||||
string_name = self.py__package__()[-1]
|
||||
return ImplicitNSName(self, string_name)
|
||||
|
||||
@@ -24,7 +24,7 @@ from parso import parse, ParserSyntaxError
|
||||
from jedi._compatibility import u
|
||||
from jedi import debug
|
||||
from jedi.inference.utils import indent_block
|
||||
from jedi.inference.cache import evaluator_method_cache
|
||||
from jedi.inference.cache import infer_state_method_cache
|
||||
from jedi.inference.base_context import iterator_to_context_set, ContextSet, \
|
||||
NO_CONTEXTS
|
||||
from jedi.inference.lazy_context import LazyKnownContexts
|
||||
@@ -205,7 +205,7 @@ def _infer_for_statement_string(module_context, string):
|
||||
# will be impossible to use `...` (Ellipsis) as a token. Docstring types
|
||||
# don't need to conform with the current grammar.
|
||||
debug.dbg('Parse docstring code %s', string, color='BLUE')
|
||||
grammar = module_context.evaluator.latest_grammar
|
||||
grammar = module_context.infer_state.latest_grammar
|
||||
try:
|
||||
module = grammar.parse(code.format(indent_block(string)), error_recovery=False)
|
||||
except ParserSyntaxError:
|
||||
@@ -223,7 +223,7 @@ def _infer_for_statement_string(module_context, string):
|
||||
|
||||
from jedi.inference.context import FunctionContext
|
||||
function_context = FunctionContext(
|
||||
module_context.evaluator,
|
||||
module_context.infer_state,
|
||||
module_context,
|
||||
funcdef
|
||||
)
|
||||
@@ -243,12 +243,12 @@ def _execute_types_in_stmt(module_context, stmt):
|
||||
"""
|
||||
definitions = module_context.infer_node(stmt)
|
||||
return ContextSet.from_sets(
|
||||
_execute_array_values(module_context.evaluator, d)
|
||||
_execute_array_values(module_context.infer_state, d)
|
||||
for d in definitions
|
||||
)
|
||||
|
||||
|
||||
def _execute_array_values(evaluator, array):
|
||||
def _execute_array_values(infer_state, array):
|
||||
"""
|
||||
Tuples indicate that there's not just one return value, but the listed
|
||||
ones. `(str, int)` means that it returns a tuple with both types.
|
||||
@@ -258,16 +258,16 @@ def _execute_array_values(evaluator, array):
|
||||
values = []
|
||||
for lazy_context in array.py__iter__():
|
||||
objects = ContextSet.from_sets(
|
||||
_execute_array_values(evaluator, typ)
|
||||
_execute_array_values(infer_state, typ)
|
||||
for typ in lazy_context.infer()
|
||||
)
|
||||
values.append(LazyKnownContexts(objects))
|
||||
return {FakeSequence(evaluator, array.array_type, values)}
|
||||
return {FakeSequence(infer_state, array.array_type, values)}
|
||||
else:
|
||||
return array.execute_annotation()
|
||||
|
||||
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def infer_param(execution_context, param):
|
||||
from jedi.inference.context.instance import InstanceArguments
|
||||
from jedi.inference.context import FunctionExecutionContext
|
||||
@@ -294,7 +294,7 @@ def infer_param(execution_context, param):
|
||||
return types
|
||||
|
||||
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
@iterator_to_context_set
|
||||
def infer_return_types(function_context):
|
||||
def search_return_in_docstr(code):
|
||||
@@ -307,5 +307,5 @@ def infer_return_types(function_context):
|
||||
yield type_
|
||||
|
||||
for type_str in search_return_in_docstr(function_context.py__doc__()):
|
||||
for type_eval in _infer_for_statement_string(function_context.get_root_context(), type_str):
|
||||
yield type_eval
|
||||
for context in _infer_for_statement_string(function_context.get_root_context(), type_str):
|
||||
yield context
|
||||
|
||||
@@ -19,7 +19,7 @@ It works as follows:
|
||||
|
||||
from jedi import settings
|
||||
from jedi import debug
|
||||
from jedi.inference.cache import evaluator_function_cache
|
||||
from jedi.inference.cache import infer_state_function_cache
|
||||
from jedi.inference import imports
|
||||
from jedi.inference.arguments import TreeArguments
|
||||
from jedi.inference.param import create_default_params
|
||||
@@ -39,12 +39,12 @@ class DynamicExecutedParams(object):
|
||||
Simulates being a parameter while actually just being multiple params.
|
||||
"""
|
||||
|
||||
def __init__(self, evaluator, executed_params):
|
||||
self.evaluator = evaluator
|
||||
def __init__(self, infer_state, executed_params):
|
||||
self.infer_state = infer_state
|
||||
self._executed_params = executed_params
|
||||
|
||||
def infer(self):
|
||||
with recursion.execution_allowed(self.evaluator, self) as allowed:
|
||||
with recursion.execution_allowed(self.infer_state, self) as allowed:
|
||||
# We need to catch recursions that may occur, because an
|
||||
# anonymous functions can create an anonymous parameter that is
|
||||
# more or less self referencing.
|
||||
@@ -54,7 +54,7 @@ class DynamicExecutedParams(object):
|
||||
|
||||
|
||||
@debug.increase_indent
|
||||
def search_params(evaluator, execution_context, funcdef):
|
||||
def search_params(infer_state, execution_context, funcdef):
|
||||
"""
|
||||
A dynamic search for param values. If you try to complete a type:
|
||||
|
||||
@@ -70,7 +70,7 @@ def search_params(evaluator, execution_context, funcdef):
|
||||
if not settings.dynamic_params:
|
||||
return create_default_params(execution_context, funcdef)
|
||||
|
||||
evaluator.dynamic_params_depth += 1
|
||||
infer_state.dynamic_params_depth += 1
|
||||
try:
|
||||
path = execution_context.get_root_context().py__file__()
|
||||
if path is not None and is_stdlib_path(path):
|
||||
@@ -91,7 +91,7 @@ def search_params(evaluator, execution_context, funcdef):
|
||||
try:
|
||||
module_context = execution_context.get_root_context()
|
||||
function_executions = _search_function_executions(
|
||||
evaluator,
|
||||
infer_state,
|
||||
module_context,
|
||||
funcdef,
|
||||
string_name=string_name,
|
||||
@@ -101,7 +101,7 @@ def search_params(evaluator, execution_context, funcdef):
|
||||
function_execution.get_executed_params_and_issues()[0]
|
||||
for function_execution in function_executions
|
||||
))
|
||||
params = [DynamicExecutedParams(evaluator, executed_params)
|
||||
params = [DynamicExecutedParams(infer_state, executed_params)
|
||||
for executed_params in zipped_params]
|
||||
# Inferes the ExecutedParams to types.
|
||||
else:
|
||||
@@ -110,12 +110,12 @@ def search_params(evaluator, execution_context, funcdef):
|
||||
debug.dbg('Dynamic param result finished', color='MAGENTA')
|
||||
return params
|
||||
finally:
|
||||
evaluator.dynamic_params_depth -= 1
|
||||
infer_state.dynamic_params_depth -= 1
|
||||
|
||||
|
||||
@evaluator_function_cache(default=None)
|
||||
@infer_state_function_cache(default=None)
|
||||
@to_list
|
||||
def _search_function_executions(evaluator, module_context, funcdef, string_name):
|
||||
def _search_function_executions(infer_state, module_context, funcdef, string_name):
|
||||
"""
|
||||
Returns a list of param names.
|
||||
"""
|
||||
@@ -129,7 +129,7 @@ def _search_function_executions(evaluator, module_context, funcdef, string_name)
|
||||
found_executions = False
|
||||
i = 0
|
||||
for for_mod_context in imports.get_modules_containing_name(
|
||||
evaluator, [module_context], string_name):
|
||||
infer_state, [module_context], string_name):
|
||||
if not isinstance(module_context, ModuleContext):
|
||||
return
|
||||
for name, trailer in _get_possible_nodes(for_mod_context, string_name):
|
||||
@@ -138,12 +138,12 @@ def _search_function_executions(evaluator, module_context, funcdef, string_name)
|
||||
# This is a simple way to stop Jedi's dynamic param recursion
|
||||
# from going wild: The deeper Jedi's in the recursion, the less
|
||||
# code should be inferred.
|
||||
if i * evaluator.dynamic_params_depth > MAX_PARAM_SEARCHES:
|
||||
if i * infer_state.dynamic_params_depth > MAX_PARAM_SEARCHES:
|
||||
return
|
||||
|
||||
random_context = evaluator.create_context(for_mod_context, name)
|
||||
random_context = infer_state.create_context(for_mod_context, name)
|
||||
for function_execution in _check_name_for_execution(
|
||||
evaluator, random_context, compare_node, name, trailer):
|
||||
infer_state, random_context, compare_node, name, trailer):
|
||||
found_executions = True
|
||||
yield function_execution
|
||||
|
||||
@@ -178,17 +178,17 @@ def _get_possible_nodes(module_context, func_string_name):
|
||||
yield name, trailer
|
||||
|
||||
|
||||
def _check_name_for_execution(evaluator, context, compare_node, name, trailer):
|
||||
def _check_name_for_execution(infer_state, context, compare_node, name, trailer):
|
||||
from jedi.inference.context.function import FunctionExecutionContext
|
||||
|
||||
def create_func_excs():
|
||||
arglist = trailer.children[1]
|
||||
if arglist == ')':
|
||||
arglist = None
|
||||
args = TreeArguments(evaluator, context, arglist, trailer)
|
||||
args = TreeArguments(infer_state, context, arglist, trailer)
|
||||
if value_node.type == 'classdef':
|
||||
created_instance = instance.TreeInstance(
|
||||
evaluator,
|
||||
infer_state,
|
||||
value.parent_context,
|
||||
value,
|
||||
args
|
||||
@@ -198,7 +198,7 @@ def _check_name_for_execution(evaluator, context, compare_node, name, trailer):
|
||||
else:
|
||||
yield value.get_function_execution(args)
|
||||
|
||||
for value in evaluator.goto_definitions(context, name):
|
||||
for value in infer_state.goto_definitions(context, name):
|
||||
value_node = value.tree_node
|
||||
if compare_node == value_node:
|
||||
for func_execution in create_func_excs():
|
||||
@@ -219,9 +219,9 @@ def _check_name_for_execution(evaluator, context, compare_node, name, trailer):
|
||||
execution_context = next(create_func_excs())
|
||||
for name, trailer in _get_possible_nodes(module_context, params[0].string_name):
|
||||
if value_node.start_pos < name.start_pos < value_node.end_pos:
|
||||
random_context = evaluator.create_context(execution_context, name)
|
||||
random_context = infer_state.create_context(execution_context, name)
|
||||
iterator = _check_name_for_execution(
|
||||
evaluator,
|
||||
infer_state,
|
||||
random_context,
|
||||
compare_node,
|
||||
name,
|
||||
|
||||
@@ -98,8 +98,8 @@ class AbstractUsedNamesFilter(AbstractFilter):
|
||||
|
||||
|
||||
class ParserTreeFilter(AbstractUsedNamesFilter):
|
||||
# TODO remove evaluator as an argument, it's not used.
|
||||
def __init__(self, evaluator, context, node_context=None, until_position=None,
|
||||
# TODO remove infer_state as an argument, it's not used.
|
||||
def __init__(self, infer_state, context, node_context=None, until_position=None,
|
||||
origin_scope=None):
|
||||
"""
|
||||
node_context is an option to specify a second context for use cases
|
||||
@@ -144,10 +144,10 @@ class ParserTreeFilter(AbstractUsedNamesFilter):
|
||||
class FunctionExecutionFilter(ParserTreeFilter):
|
||||
param_name = ParamName
|
||||
|
||||
def __init__(self, evaluator, context, node_context=None,
|
||||
def __init__(self, infer_state, context, node_context=None,
|
||||
until_position=None, origin_scope=None):
|
||||
super(FunctionExecutionFilter, self).__init__(
|
||||
evaluator,
|
||||
infer_state,
|
||||
context,
|
||||
node_context,
|
||||
until_position,
|
||||
@@ -237,7 +237,7 @@ class _BuiltinMappedMethod(Context):
|
||||
|
||||
def __init__(self, builtin_context, method, builtin_func):
|
||||
super(_BuiltinMappedMethod, self).__init__(
|
||||
builtin_context.evaluator,
|
||||
builtin_context.infer_state,
|
||||
parent_context=builtin_context
|
||||
)
|
||||
self._method = method
|
||||
@@ -262,7 +262,7 @@ class SpecialMethodFilter(DictFilter):
|
||||
def __init__(self, parent_context, string_name, value, builtin_context):
|
||||
callable_, python_version = value
|
||||
if python_version is not None and \
|
||||
python_version != parent_context.evaluator.environment.version_info.major:
|
||||
python_version != parent_context.infer_state.environment.version_info.major:
|
||||
raise KeyError
|
||||
|
||||
self.parent_context = parent_context
|
||||
@@ -329,8 +329,8 @@ class _AttributeOverwriteMixin(object):
|
||||
|
||||
class LazyAttributeOverwrite(use_metaclass(_OverwriteMeta, _AttributeOverwriteMixin,
|
||||
LazyContextWrapper)):
|
||||
def __init__(self, evaluator):
|
||||
self.evaluator = evaluator
|
||||
def __init__(self, infer_state):
|
||||
self.infer_state = infer_state
|
||||
|
||||
|
||||
class AttributeOverwrite(use_metaclass(_OverwriteMeta, _AttributeOverwriteMixin,
|
||||
@@ -346,7 +346,7 @@ def publish_method(method_name, python_version_match=None):
|
||||
return decorator
|
||||
|
||||
|
||||
def get_global_filters(evaluator, context, until_position, origin_scope):
|
||||
def get_global_filters(infer_state, context, until_position, origin_scope):
|
||||
"""
|
||||
Returns all filters in order of priority for name resolution.
|
||||
|
||||
@@ -365,7 +365,7 @@ def get_global_filters(evaluator, context, until_position, origin_scope):
|
||||
>>> scope
|
||||
<Function: func@3-5>
|
||||
>>> context = script._get_module().create_context(scope)
|
||||
>>> filters = list(get_global_filters(context.evaluator, context, (4, 0), None))
|
||||
>>> filters = list(get_global_filters(context.infer_state, context, (4, 0), None))
|
||||
|
||||
First we get the names from the function scope.
|
||||
|
||||
@@ -409,4 +409,4 @@ def get_global_filters(evaluator, context, until_position, origin_scope):
|
||||
context = context.parent_context
|
||||
|
||||
# Add builtins to the global scope.
|
||||
yield next(evaluator.builtins_module.get_filters())
|
||||
yield next(infer_state.builtins_module.get_filters())
|
||||
|
||||
@@ -33,9 +33,9 @@ from jedi.inference.gradual.conversion import convert_contexts
|
||||
|
||||
|
||||
class NameFinder(object):
|
||||
def __init__(self, evaluator, context, name_context, name_or_str,
|
||||
def __init__(self, infer_state, context, name_context, name_or_str,
|
||||
position=None, analysis_errors=True):
|
||||
self._evaluator = evaluator
|
||||
self._infer_state = infer_state
|
||||
# Make sure that it's not just a syntax tree node.
|
||||
self._context = context
|
||||
self._name_context = name_context
|
||||
@@ -114,7 +114,7 @@ class NameFinder(object):
|
||||
if lambdef is None or position < lambdef.children[-2].start_pos:
|
||||
position = ancestor.start_pos
|
||||
|
||||
return get_global_filters(self._evaluator, self._context, position, origin_scope)
|
||||
return get_global_filters(self._infer_state, self._context, position, origin_scope)
|
||||
else:
|
||||
return self._get_context_filters(origin_scope)
|
||||
|
||||
@@ -173,7 +173,7 @@ class NameFinder(object):
|
||||
def _check_getattr(self, inst):
|
||||
"""Checks for both __getattr__ and __getattribute__ methods"""
|
||||
# str is important, because it shouldn't be `Name`!
|
||||
name = compiled.create_simple_object(self._evaluator, self._string_name)
|
||||
name = compiled.create_simple_object(self._infer_state, self._string_name)
|
||||
|
||||
# This is a little bit special. `__getattribute__` is in Python
|
||||
# executed before `__getattr__`. But: I know no use case, where
|
||||
@@ -265,7 +265,7 @@ def _check_isinstance_type(context, element, search_name):
|
||||
|
||||
# arglist stuff
|
||||
arglist = trailer.children[1]
|
||||
args = TreeArguments(context.evaluator, context, arglist, trailer)
|
||||
args = TreeArguments(context.infer_state, context, arglist, trailer)
|
||||
param_list = list(args.unpack())
|
||||
# Disallow keyword arguments
|
||||
assert len(param_list) == 2
|
||||
@@ -275,7 +275,7 @@ def _check_isinstance_type(context, element, search_name):
|
||||
is_instance_call = helpers.call_of_leaf(lazy_context_object.data)
|
||||
# Do a simple get_code comparison. They should just have the same code,
|
||||
# and everything will be all right.
|
||||
normalize = context.evaluator.grammar._normalize
|
||||
normalize = context.infer_state.grammar._normalize
|
||||
assert normalize(is_instance_call) == normalize(call)
|
||||
except AssertionError:
|
||||
return None
|
||||
|
||||
@@ -106,7 +106,7 @@ def _break_check(context, context_scope, flow_scope, node):
|
||||
|
||||
|
||||
def _check_if(context, node):
|
||||
with execution_allowed(context.evaluator, node) as allowed:
|
||||
with execution_allowed(context.infer_state, node) as allowed:
|
||||
if not allowed:
|
||||
return UNSURE
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import re
|
||||
from parso import ParserSyntaxError, parse
|
||||
|
||||
from jedi._compatibility import force_unicode
|
||||
from jedi.inference.cache import evaluator_method_cache
|
||||
from jedi.inference.cache import infer_state_method_cache
|
||||
from jedi.inference.base_context import ContextSet, NO_CONTEXTS
|
||||
from jedi.inference.gradual.typing import TypeVar, LazyGenericClass, \
|
||||
AbstractAnnotatedClass
|
||||
@@ -32,13 +32,13 @@ def infer_annotation(context, annotation):
|
||||
"""
|
||||
context_set = context.infer_node(annotation)
|
||||
if len(context_set) != 1:
|
||||
debug.warning("Eval'ed typing index %s should lead to 1 object, "
|
||||
debug.warning("Inferred typing index %s should lead to 1 object, "
|
||||
" not %s" % (annotation, context_set))
|
||||
return context_set
|
||||
|
||||
evaled_context = list(context_set)[0]
|
||||
if is_string(evaled_context):
|
||||
result = _get_forward_reference_node(context, evaled_context.get_safe_value())
|
||||
inferred_context = list(context_set)[0]
|
||||
if is_string(inferred_context):
|
||||
result = _get_forward_reference_node(context, inferred_context.get_safe_value())
|
||||
if result is not None:
|
||||
return context.infer_node(result)
|
||||
return context_set
|
||||
@@ -60,7 +60,7 @@ def _infer_annotation_string(context, string, index=None):
|
||||
|
||||
def _get_forward_reference_node(context, string):
|
||||
try:
|
||||
new_node = context.evaluator.grammar.parse(
|
||||
new_node = context.infer_state.grammar.parse(
|
||||
force_unicode(string),
|
||||
start_symbol='eval_input',
|
||||
error_recovery=False
|
||||
@@ -106,21 +106,21 @@ def _split_comment_param_declaration(decl_text):
|
||||
return params
|
||||
|
||||
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def infer_param(execution_context, param):
|
||||
contexts = _infer_param(execution_context, param)
|
||||
evaluator = execution_context.evaluator
|
||||
infer_state = execution_context.infer_state
|
||||
if param.star_count == 1:
|
||||
tuple_ = builtin_from_name(evaluator, 'tuple')
|
||||
tuple_ = builtin_from_name(infer_state, 'tuple')
|
||||
return ContextSet([GenericClass(
|
||||
tuple_,
|
||||
generics=(contexts,),
|
||||
) for c in contexts])
|
||||
elif param.star_count == 2:
|
||||
dct = builtin_from_name(evaluator, 'dict')
|
||||
dct = builtin_from_name(infer_state, 'dict')
|
||||
return ContextSet([GenericClass(
|
||||
dct,
|
||||
generics=(ContextSet([builtin_from_name(evaluator, 'str')]), contexts),
|
||||
generics=(ContextSet([builtin_from_name(infer_state, 'str')]), contexts),
|
||||
) for c in contexts])
|
||||
pass
|
||||
return contexts
|
||||
@@ -190,7 +190,7 @@ def py__annotations__(funcdef):
|
||||
return dct
|
||||
|
||||
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def infer_return_types(function_execution_context):
|
||||
"""
|
||||
Infers the type of a function's return value,
|
||||
|
||||
@@ -87,11 +87,11 @@ def _load_stub_module(module):
|
||||
return module
|
||||
from jedi.inference.gradual.typeshed import _try_to_load_stub_cached
|
||||
return _try_to_load_stub_cached(
|
||||
module.evaluator,
|
||||
module.infer_state,
|
||||
import_names=module.string_names,
|
||||
python_context_set=ContextSet([module]),
|
||||
parent_module_context=None,
|
||||
sys_path=module.evaluator.get_sys_path(),
|
||||
sys_path=module.infer_state.get_sys_path(),
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ class StubModuleContext(ModuleContext):
|
||||
|
||||
def _get_stub_filters(self, search_global, **filter_kwargs):
|
||||
return [StubFilter(
|
||||
self.evaluator,
|
||||
self.infer_state,
|
||||
context=self,
|
||||
search_global=search_global,
|
||||
**filter_kwargs
|
||||
|
||||
@@ -89,9 +89,9 @@ def _cache_stub_file_map(version_info):
|
||||
|
||||
def import_module_decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(evaluator, import_names, parent_module_context, sys_path, prefer_stubs):
|
||||
def wrapper(infer_state, import_names, parent_module_context, sys_path, prefer_stubs):
|
||||
try:
|
||||
python_context_set = evaluator.module_cache.get(import_names)
|
||||
python_context_set = infer_state.module_cache.get(import_names)
|
||||
except KeyError:
|
||||
if parent_module_context is not None and parent_module_context.is_stub():
|
||||
parent_module_contexts = parent_module_context.non_stub_context_set
|
||||
@@ -104,19 +104,19 @@ def import_module_decorator(func):
|
||||
# ``os``.
|
||||
python_parent = next(iter(parent_module_contexts))
|
||||
if python_parent is None:
|
||||
python_parent, = evaluator.import_module(('os',), prefer_stubs=False)
|
||||
python_parent, = infer_state.import_module(('os',), prefer_stubs=False)
|
||||
python_context_set = python_parent.py__getattribute__('path')
|
||||
else:
|
||||
python_context_set = ContextSet.from_sets(
|
||||
func(evaluator, import_names, p, sys_path,)
|
||||
func(infer_state, import_names, p, sys_path,)
|
||||
for p in parent_module_contexts
|
||||
)
|
||||
evaluator.module_cache.add(import_names, python_context_set)
|
||||
infer_state.module_cache.add(import_names, python_context_set)
|
||||
|
||||
if not prefer_stubs:
|
||||
return python_context_set
|
||||
|
||||
stub = _try_to_load_stub_cached(evaluator, import_names, python_context_set,
|
||||
stub = _try_to_load_stub_cached(infer_state, import_names, python_context_set,
|
||||
parent_module_context, sys_path)
|
||||
if stub is not None:
|
||||
return ContextSet([stub])
|
||||
@@ -125,21 +125,21 @@ def import_module_decorator(func):
|
||||
return wrapper
|
||||
|
||||
|
||||
def _try_to_load_stub_cached(evaluator, import_names, *args, **kwargs):
|
||||
def _try_to_load_stub_cached(infer_state, import_names, *args, **kwargs):
|
||||
try:
|
||||
return evaluator.stub_module_cache[import_names]
|
||||
return infer_state.stub_module_cache[import_names]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# TODO is this needed? where are the exceptions coming from that make this
|
||||
# necessary? Just remove this line.
|
||||
evaluator.stub_module_cache[import_names] = None
|
||||
evaluator.stub_module_cache[import_names] = result = \
|
||||
_try_to_load_stub(evaluator, import_names, *args, **kwargs)
|
||||
infer_state.stub_module_cache[import_names] = None
|
||||
infer_state.stub_module_cache[import_names] = result = \
|
||||
_try_to_load_stub(infer_state, import_names, *args, **kwargs)
|
||||
return result
|
||||
|
||||
|
||||
def _try_to_load_stub(evaluator, import_names, python_context_set,
|
||||
def _try_to_load_stub(infer_state, import_names, python_context_set,
|
||||
parent_module_context, sys_path):
|
||||
"""
|
||||
Trying to load a stub for a set of import_names.
|
||||
@@ -150,7 +150,7 @@ def _try_to_load_stub(evaluator, import_names, python_context_set,
|
||||
if parent_module_context is None and len(import_names) > 1:
|
||||
try:
|
||||
parent_module_context = _try_to_load_stub_cached(
|
||||
evaluator, import_names[:-1], NO_CONTEXTS,
|
||||
infer_state, import_names[:-1], NO_CONTEXTS,
|
||||
parent_module_context=None, sys_path=sys_path)
|
||||
except KeyError:
|
||||
pass
|
||||
@@ -161,7 +161,7 @@ def _try_to_load_stub(evaluator, import_names, python_context_set,
|
||||
for p in sys_path:
|
||||
init = os.path.join(p, *import_names) + '-stubs' + os.path.sep + '__init__.pyi'
|
||||
m = _try_to_load_stub_from_file(
|
||||
evaluator,
|
||||
infer_state,
|
||||
python_context_set,
|
||||
file_io=FileIO(init),
|
||||
import_names=import_names,
|
||||
@@ -185,7 +185,7 @@ def _try_to_load_stub(evaluator, import_names, python_context_set,
|
||||
|
||||
for file_path in file_paths:
|
||||
m = _try_to_load_stub_from_file(
|
||||
evaluator,
|
||||
infer_state,
|
||||
python_context_set,
|
||||
# The file path should end with .pyi
|
||||
file_io=FileIO(file_path),
|
||||
@@ -195,7 +195,7 @@ def _try_to_load_stub(evaluator, import_names, python_context_set,
|
||||
return m
|
||||
|
||||
# 3. Try to load typeshed
|
||||
m = _load_from_typeshed(evaluator, python_context_set, parent_module_context, import_names)
|
||||
m = _load_from_typeshed(infer_state, python_context_set, parent_module_context, import_names)
|
||||
if m is not None:
|
||||
return m
|
||||
|
||||
@@ -216,7 +216,7 @@ def _try_to_load_stub(evaluator, import_names, python_context_set,
|
||||
|
||||
for p in check_path:
|
||||
m = _try_to_load_stub_from_file(
|
||||
evaluator,
|
||||
infer_state,
|
||||
python_context_set,
|
||||
file_io=FileIO(os.path.join(p, *names_for_path) + '.pyi'),
|
||||
import_names=import_names,
|
||||
@@ -229,11 +229,11 @@ def _try_to_load_stub(evaluator, import_names, python_context_set,
|
||||
return None
|
||||
|
||||
|
||||
def _load_from_typeshed(evaluator, python_context_set, parent_module_context, import_names):
|
||||
def _load_from_typeshed(infer_state, python_context_set, parent_module_context, import_names):
|
||||
import_name = import_names[-1]
|
||||
map_ = None
|
||||
if len(import_names) == 1:
|
||||
map_ = _cache_stub_file_map(evaluator.grammar.version_info)
|
||||
map_ = _cache_stub_file_map(infer_state.grammar.version_info)
|
||||
import_name = _IMPORT_MAP.get(import_name, import_name)
|
||||
elif isinstance(parent_module_context, StubModuleContext):
|
||||
if not parent_module_context.is_package:
|
||||
@@ -247,16 +247,16 @@ def _load_from_typeshed(evaluator, python_context_set, parent_module_context, im
|
||||
path = map_.get(import_name)
|
||||
if path is not None:
|
||||
return _try_to_load_stub_from_file(
|
||||
evaluator,
|
||||
infer_state,
|
||||
python_context_set,
|
||||
file_io=FileIO(path),
|
||||
import_names=import_names,
|
||||
)
|
||||
|
||||
|
||||
def _try_to_load_stub_from_file(evaluator, python_context_set, file_io, import_names):
|
||||
def _try_to_load_stub_from_file(infer_state, python_context_set, file_io, import_names):
|
||||
try:
|
||||
stub_module_node = evaluator.parse(
|
||||
stub_module_node = infer_state.parse(
|
||||
file_io=file_io,
|
||||
cache=True,
|
||||
use_latest_grammar=True
|
||||
@@ -266,24 +266,24 @@ def _try_to_load_stub_from_file(evaluator, python_context_set, file_io, import_n
|
||||
return None
|
||||
else:
|
||||
return create_stub_module(
|
||||
evaluator, python_context_set, stub_module_node, file_io,
|
||||
infer_state, python_context_set, stub_module_node, file_io,
|
||||
import_names
|
||||
)
|
||||
|
||||
|
||||
def create_stub_module(evaluator, python_context_set, stub_module_node, file_io, import_names):
|
||||
def create_stub_module(infer_state, python_context_set, stub_module_node, file_io, import_names):
|
||||
if import_names == ('typing',):
|
||||
module_cls = TypingModuleWrapper
|
||||
else:
|
||||
module_cls = StubModuleContext
|
||||
file_name = os.path.basename(file_io.path)
|
||||
stub_module_context = module_cls(
|
||||
python_context_set, evaluator, stub_module_node,
|
||||
python_context_set, infer_state, stub_module_node,
|
||||
file_io=file_io,
|
||||
string_names=import_names,
|
||||
# The code was loaded with latest_grammar, so use
|
||||
# that.
|
||||
code_lines=get_cached_code_lines(evaluator.latest_grammar, file_io.path),
|
||||
code_lines=get_cached_code_lines(infer_state.latest_grammar, file_io.path),
|
||||
is_package=file_name == '__init__.pyi',
|
||||
)
|
||||
return stub_module_context
|
||||
|
||||
@@ -7,7 +7,7 @@ This file deals with all the typing.py cases.
|
||||
"""
|
||||
from jedi._compatibility import unicode, force_unicode
|
||||
from jedi import debug
|
||||
from jedi.inference.cache import evaluator_method_cache
|
||||
from jedi.inference.cache import infer_state_method_cache
|
||||
from jedi.inference.compiled import builtin_from_name
|
||||
from jedi.inference.base_context import ContextSet, NO_CONTEXTS, Context, \
|
||||
iterator_to_context_set, ContextWrapper, LazyContextWrapper
|
||||
@@ -45,8 +45,8 @@ class TypingName(AbstractTreeName):
|
||||
|
||||
|
||||
class _BaseTypingContext(Context):
|
||||
def __init__(self, evaluator, parent_context, tree_name):
|
||||
super(_BaseTypingContext, self).__init__(evaluator, parent_context)
|
||||
def __init__(self, infer_state, parent_context, tree_name):
|
||||
super(_BaseTypingContext, self).__init__(infer_state, parent_context)
|
||||
self._tree_name = tree_name
|
||||
|
||||
@property
|
||||
@@ -71,7 +71,7 @@ class _BaseTypingContext(Context):
|
||||
# TODO this is obviously not correct, but at least gives us a class if
|
||||
# we have none. Some of these objects don't really have a base class in
|
||||
# typeshed.
|
||||
return builtin_from_name(self.evaluator, u'object')
|
||||
return builtin_from_name(self.infer_state, u'object')
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@@ -87,39 +87,39 @@ class TypingModuleName(NameWrapper):
|
||||
|
||||
def _remap(self):
|
||||
name = self.string_name
|
||||
evaluator = self.parent_context.evaluator
|
||||
infer_state = self.parent_context.infer_state
|
||||
try:
|
||||
actual = _TYPE_ALIAS_TYPES[name]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
yield TypeAlias.create_cached(evaluator, self.parent_context, self.tree_name, actual)
|
||||
yield TypeAlias.create_cached(infer_state, self.parent_context, self.tree_name, actual)
|
||||
return
|
||||
|
||||
if name in _PROXY_CLASS_TYPES:
|
||||
yield TypingClassContext.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
yield TypingClassContext.create_cached(infer_state, self.parent_context, self.tree_name)
|
||||
elif name in _PROXY_TYPES:
|
||||
yield TypingContext.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
yield TypingContext.create_cached(infer_state, self.parent_context, self.tree_name)
|
||||
elif name == 'runtime':
|
||||
# We don't want anything here, not sure what this function is
|
||||
# supposed to do, since it just appears in the stubs and shouldn't
|
||||
# have any effects there (because it's never executed).
|
||||
return
|
||||
elif name == 'TypeVar':
|
||||
yield TypeVarClass.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
yield TypeVarClass.create_cached(infer_state, self.parent_context, self.tree_name)
|
||||
elif name == 'Any':
|
||||
yield Any.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
yield Any.create_cached(infer_state, self.parent_context, self.tree_name)
|
||||
elif name == 'TYPE_CHECKING':
|
||||
# This is needed for e.g. imports that are only available for type
|
||||
# checking or are in cycles. The user can then check this variable.
|
||||
yield builtin_from_name(evaluator, u'True')
|
||||
yield builtin_from_name(infer_state, u'True')
|
||||
elif name == 'overload':
|
||||
yield OverloadFunction.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
yield OverloadFunction.create_cached(infer_state, self.parent_context, self.tree_name)
|
||||
elif name == 'NewType':
|
||||
yield NewTypeFunction.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
yield NewTypeFunction.create_cached(infer_state, self.parent_context, self.tree_name)
|
||||
elif name == 'cast':
|
||||
# TODO implement cast
|
||||
yield CastFunction.create_cached(evaluator, self.parent_context, self.tree_name)
|
||||
yield CastFunction.create_cached(infer_state, self.parent_context, self.tree_name)
|
||||
elif name == 'TypedDict':
|
||||
# TODO doesn't even exist in typeshed/typing.py, yet. But will be
|
||||
# added soon.
|
||||
@@ -139,8 +139,8 @@ class TypingModuleFilterWrapper(FilterWrapper):
|
||||
|
||||
|
||||
class _WithIndexBase(_BaseTypingContext):
|
||||
def __init__(self, evaluator, parent_context, name, index_context, context_of_index):
|
||||
super(_WithIndexBase, self).__init__(evaluator, parent_context, name)
|
||||
def __init__(self, infer_state, parent_context, name, index_context, context_of_index):
|
||||
super(_WithIndexBase, self).__init__(infer_state, parent_context, name)
|
||||
self._index_context = index_context
|
||||
self._context_of_index = context_of_index
|
||||
|
||||
@@ -164,7 +164,7 @@ class TypingContextWithIndex(_WithIndexBase):
|
||||
# Optional is basically just saying it's either None or the actual
|
||||
# type.
|
||||
return self.gather_annotation_classes().execute_annotation() \
|
||||
| ContextSet([builtin_from_name(self.evaluator, u'None')])
|
||||
| ContextSet([builtin_from_name(self.infer_state, u'None')])
|
||||
elif string_name == 'Type':
|
||||
# The type is actually already given in the index_context
|
||||
return ContextSet([self._index_context])
|
||||
@@ -174,7 +174,7 @@ class TypingContextWithIndex(_WithIndexBase):
|
||||
|
||||
cls = globals()[string_name]
|
||||
return ContextSet([cls(
|
||||
self.evaluator,
|
||||
self.infer_state,
|
||||
self.parent_context,
|
||||
self._tree_name,
|
||||
self._index_context,
|
||||
@@ -194,7 +194,7 @@ class TypingContext(_BaseTypingContext):
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
return ContextSet(
|
||||
self.index_class.create_cached(
|
||||
self.evaluator,
|
||||
self.infer_state,
|
||||
self.parent_context,
|
||||
self._tree_name,
|
||||
index_context,
|
||||
@@ -206,7 +206,7 @@ class TypingContext(_BaseTypingContext):
|
||||
class _TypingClassMixin(object):
|
||||
def py__bases__(self):
|
||||
return [LazyKnownContexts(
|
||||
self.evaluator.builtins_module.py__getattribute__('object')
|
||||
self.infer_state.builtins_module.py__getattribute__('object')
|
||||
)]
|
||||
|
||||
def get_metaclasses(self):
|
||||
@@ -246,7 +246,7 @@ def _iter_over_arguments(maybe_tuple_context, defining_context):
|
||||
|
||||
class TypeAlias(LazyContextWrapper):
|
||||
def __init__(self, parent_context, origin_tree_name, actual):
|
||||
self.evaluator = parent_context.evaluator
|
||||
self.infer_state = parent_context.infer_state
|
||||
self.parent_context = parent_context
|
||||
self._origin_tree_name = origin_tree_name
|
||||
self._actual = actual # e.g. builtins.list
|
||||
@@ -263,13 +263,13 @@ class TypeAlias(LazyContextWrapper):
|
||||
|
||||
def _get_wrapped_context(self):
|
||||
module_name, class_name = self._actual.split('.')
|
||||
if self.evaluator.environment.version_info.major == 2 and module_name == 'builtins':
|
||||
if self.infer_state.environment.version_info.major == 2 and module_name == 'builtins':
|
||||
module_name = '__builtin__'
|
||||
|
||||
# TODO use evaluator.import_module?
|
||||
# TODO use infer_state.import_module?
|
||||
from jedi.inference.imports import Importer
|
||||
module, = Importer(
|
||||
self.evaluator, [module_name], self.evaluator.builtins_module
|
||||
self.infer_state, [module_name], self.infer_state.builtins_module
|
||||
).follow()
|
||||
classes = module.py__getattribute__(class_name)
|
||||
# There should only be one, because it's code that we control.
|
||||
@@ -358,7 +358,7 @@ class TypeVarClass(_BaseTypingContext):
|
||||
return NO_CONTEXTS
|
||||
|
||||
return ContextSet([TypeVar.create_cached(
|
||||
self.evaluator,
|
||||
self.infer_state,
|
||||
self.parent_context,
|
||||
self._tree_name,
|
||||
var_name,
|
||||
@@ -382,7 +382,7 @@ class TypeVarClass(_BaseTypingContext):
|
||||
return None
|
||||
else:
|
||||
safe_value = method(default=None)
|
||||
if self.evaluator.environment.version_info.major == 2:
|
||||
if self.infer_state.environment.version_info.major == 2:
|
||||
if isinstance(safe_value, bytes):
|
||||
return force_unicode(safe_value)
|
||||
if isinstance(safe_value, (str, unicode)):
|
||||
@@ -391,8 +391,8 @@ class TypeVarClass(_BaseTypingContext):
|
||||
|
||||
|
||||
class TypeVar(_BaseTypingContext):
|
||||
def __init__(self, evaluator, parent_context, tree_name, var_name, unpacked_args):
|
||||
super(TypeVar, self).__init__(evaluator, parent_context, tree_name)
|
||||
def __init__(self, infer_state, parent_context, tree_name, var_name, unpacked_args):
|
||||
super(TypeVar, self).__init__(infer_state, parent_context, tree_name)
|
||||
self._var_name = var_name
|
||||
|
||||
self._constraints_lazy_contexts = []
|
||||
@@ -469,7 +469,7 @@ class NewTypeFunction(_BaseTypingContext):
|
||||
return NO_CONTEXTS
|
||||
return ContextSet(
|
||||
NewType(
|
||||
self.evaluator,
|
||||
self.infer_state,
|
||||
contextualized_node.context,
|
||||
contextualized_node.node,
|
||||
second_arg.infer(),
|
||||
@@ -477,8 +477,8 @@ class NewTypeFunction(_BaseTypingContext):
|
||||
|
||||
|
||||
class NewType(Context):
|
||||
def __init__(self, evaluator, parent_context, tree_node, type_context_set):
|
||||
super(NewType, self).__init__(evaluator, parent_context)
|
||||
def __init__(self, infer_state, parent_context, tree_node, type_context_set):
|
||||
super(NewType, self).__init__(infer_state, parent_context)
|
||||
self._type_context_set = type_context_set
|
||||
self.tree_node = tree_node
|
||||
|
||||
@@ -643,7 +643,7 @@ class LazyGenericClass(AbstractAnnotatedClass):
|
||||
self._index_context = index_context
|
||||
self._context_of_index = context_of_index
|
||||
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def get_generics(self):
|
||||
return list(_iter_over_arguments(self._index_context, self._context_of_index))
|
||||
|
||||
@@ -668,7 +668,7 @@ class LazyAnnotatedBaseClass(object):
|
||||
if isinstance(base, AbstractAnnotatedClass):
|
||||
# Here we have to recalculate the given types.
|
||||
yield GenericClass.create_cached(
|
||||
base.evaluator,
|
||||
base.infer_state,
|
||||
base._wrapped_context,
|
||||
tuple(self._remap_type_vars(base)),
|
||||
)
|
||||
@@ -703,5 +703,5 @@ class InstanceWrapper(ContextWrapper):
|
||||
except IndexError:
|
||||
pass
|
||||
elif cls.py__name__() == 'Iterator':
|
||||
return ContextSet([builtin_from_name(self.evaluator, u'None')])
|
||||
return ContextSet([builtin_from_name(self.infer_state, u'None')])
|
||||
return self._wrapped_context.py__stop_iteration_returns()
|
||||
|
||||
@@ -3,7 +3,7 @@ import os
|
||||
from jedi.inference.gradual.typeshed import TYPESHED_PATH, create_stub_module
|
||||
|
||||
|
||||
def load_proper_stub_module(evaluator, file_io, import_names, module_node):
|
||||
def load_proper_stub_module(infer_state, file_io, import_names, module_node):
|
||||
"""
|
||||
This function is given a random .pyi file and should return the proper
|
||||
module.
|
||||
@@ -20,13 +20,13 @@ def load_proper_stub_module(evaluator, file_io, import_names, module_node):
|
||||
import_names = import_names[:-1]
|
||||
|
||||
if import_names is not None:
|
||||
actual_context_set = evaluator.import_module(import_names, prefer_stubs=False)
|
||||
actual_context_set = infer_state.import_module(import_names, prefer_stubs=False)
|
||||
if not actual_context_set:
|
||||
return None
|
||||
|
||||
stub = create_stub_module(
|
||||
evaluator, actual_context_set, module_node, file_io, import_names
|
||||
infer_state, actual_context_set, module_node, file_io, import_names
|
||||
)
|
||||
evaluator.stub_module_cache[import_names] = stub
|
||||
infer_state.stub_module_cache[import_names] = stub
|
||||
return stub
|
||||
return None
|
||||
|
||||
@@ -66,7 +66,7 @@ def infer_call_of_leaf(context, leaf, cut_own_trailer=False):
|
||||
trailer = leaf.parent
|
||||
if trailer.type == 'fstring':
|
||||
from jedi.inference import compiled
|
||||
return compiled.get_string_context_set(context.evaluator)
|
||||
return compiled.get_string_context_set(context.infer_state)
|
||||
|
||||
# The leaf may not be the last or first child, because there exist three
|
||||
# different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
|
||||
@@ -195,7 +195,7 @@ def predefine_names(context, flow_scope, dct):
|
||||
|
||||
|
||||
def is_string(context):
|
||||
if context.evaluator.environment.version_info.major == 2:
|
||||
if context.infer_state.environment.version_info.major == 2:
|
||||
str_classes = (unicode, bytes)
|
||||
else:
|
||||
str_classes = (unicode,)
|
||||
@@ -265,5 +265,5 @@ def parse_dotted_names(nodes, is_import_from, until_node=None):
|
||||
return level, names
|
||||
|
||||
|
||||
def contexts_from_qualified_names(evaluator, *names):
|
||||
return evaluator.import_module(names[:-1]).py__getattribute__(names[-1])
|
||||
def contexts_from_qualified_names(infer_state, *names):
|
||||
return infer_state.import_module(names[:-1]).py__getattribute__(names[-1])
|
||||
|
||||
@@ -28,7 +28,7 @@ from jedi.inference import helpers
|
||||
from jedi.inference import compiled
|
||||
from jedi.inference import analysis
|
||||
from jedi.inference.utils import unite
|
||||
from jedi.inference.cache import evaluator_method_cache
|
||||
from jedi.inference.cache import infer_state_method_cache
|
||||
from jedi.inference.names import ImportName, SubModuleName
|
||||
from jedi.inference.base_context import ContextSet, NO_CONTEXTS
|
||||
from jedi.inference.gradual.typeshed import import_module_decorator
|
||||
@@ -56,13 +56,13 @@ class ModuleCache(object):
|
||||
|
||||
# This memoization is needed, because otherwise we will infinitely loop on
|
||||
# certain imports.
|
||||
@evaluator_method_cache(default=NO_CONTEXTS)
|
||||
@infer_state_method_cache(default=NO_CONTEXTS)
|
||||
def infer_import(context, tree_name, is_goto=False):
|
||||
module_context = context.get_root_context()
|
||||
import_node = search_ancestor(tree_name, 'import_name', 'import_from')
|
||||
import_path = import_node.get_path_for_name(tree_name)
|
||||
from_import_name = None
|
||||
evaluator = context.evaluator
|
||||
infer_state = context.infer_state
|
||||
try:
|
||||
from_names = import_node.get_from_names()
|
||||
except AttributeError:
|
||||
@@ -75,7 +75,7 @@ def infer_import(context, tree_name, is_goto=False):
|
||||
from_import_name = import_path[-1]
|
||||
import_path = from_names
|
||||
|
||||
importer = Importer(evaluator, tuple(import_path),
|
||||
importer = Importer(infer_state, tuple(import_path),
|
||||
module_context, import_node.level)
|
||||
|
||||
types = importer.follow()
|
||||
@@ -101,7 +101,7 @@ def infer_import(context, tree_name, is_goto=False):
|
||||
|
||||
if not types:
|
||||
path = import_path + [from_import_name]
|
||||
importer = Importer(evaluator, tuple(path),
|
||||
importer = Importer(infer_state, tuple(path),
|
||||
module_context, import_node.level)
|
||||
types = importer.follow()
|
||||
# goto only accepts `Name`
|
||||
@@ -183,7 +183,7 @@ def _level_to_base_import_path(project_path, directory, level):
|
||||
|
||||
|
||||
class Importer(object):
|
||||
def __init__(self, evaluator, import_path, module_context, level=0):
|
||||
def __init__(self, infer_state, import_path, module_context, level=0):
|
||||
"""
|
||||
An implementation similar to ``__import__``. Use `follow`
|
||||
to actually follow the imports.
|
||||
@@ -197,7 +197,7 @@ class Importer(object):
|
||||
:param import_path: List of namespaces (strings or Names).
|
||||
"""
|
||||
debug.speed('import %s %s' % (import_path, module_context))
|
||||
self._evaluator = evaluator
|
||||
self._infer_state = infer_state
|
||||
self.level = level
|
||||
self.module_context = module_context
|
||||
|
||||
@@ -233,7 +233,7 @@ class Importer(object):
|
||||
directory = os.path.dirname(path)
|
||||
|
||||
base_import_path, base_directory = _level_to_base_import_path(
|
||||
self._evaluator.project._path, directory, level,
|
||||
self._infer_state.project._path, directory, level,
|
||||
)
|
||||
if base_directory is None:
|
||||
# Everything is lost, the relative import does point
|
||||
@@ -265,11 +265,11 @@ class Importer(object):
|
||||
return self._fixed_sys_path
|
||||
|
||||
sys_path_mod = (
|
||||
self._evaluator.get_sys_path()
|
||||
self._infer_state.get_sys_path()
|
||||
+ sys_path.check_sys_path_modifications(self.module_context)
|
||||
)
|
||||
|
||||
if self._evaluator.environment.version_info.major == 2:
|
||||
if self._infer_state.environment.version_info.major == 2:
|
||||
file_path = self.module_context.py__file__()
|
||||
if file_path is not None:
|
||||
# Python2 uses an old strange way of importing relative imports.
|
||||
@@ -290,7 +290,7 @@ class Importer(object):
|
||||
context_set = [None]
|
||||
for i, name in enumerate(self.import_path):
|
||||
context_set = ContextSet.from_sets([
|
||||
self._evaluator.import_module(
|
||||
self._infer_state.import_module(
|
||||
import_names[:i+1],
|
||||
parent_module_context,
|
||||
sys_path
|
||||
@@ -311,12 +311,12 @@ class Importer(object):
|
||||
# add builtin module names
|
||||
if search_path is None and in_module is None:
|
||||
names += [ImportName(self.module_context, name)
|
||||
for name in self._evaluator.compiled_subprocess.get_builtin_module_names()]
|
||||
for name in self._infer_state.compiled_subprocess.get_builtin_module_names()]
|
||||
|
||||
if search_path is None:
|
||||
search_path = self._sys_path_with_modifications()
|
||||
|
||||
for name in iter_module_names(self._evaluator, search_path):
|
||||
for name in iter_module_names(self._infer_state, search_path):
|
||||
if in_module is None:
|
||||
n = ImportName(self.module_context, name)
|
||||
else:
|
||||
@@ -324,7 +324,7 @@ class Importer(object):
|
||||
names.append(n)
|
||||
return names
|
||||
|
||||
def completion_names(self, evaluator, only_modules=False):
|
||||
def completion_names(self, infer_state, only_modules=False):
|
||||
"""
|
||||
:param only_modules: Indicates wheter it's possible to import a
|
||||
definition that is not defined in a module.
|
||||
@@ -374,12 +374,12 @@ class Importer(object):
|
||||
|
||||
@plugin_manager.decorate()
|
||||
@import_module_decorator
|
||||
def import_module(evaluator, import_names, parent_module_context, sys_path):
|
||||
def import_module(infer_state, import_names, parent_module_context, sys_path):
|
||||
"""
|
||||
This method is very similar to importlib's `_gcd_import`.
|
||||
"""
|
||||
if import_names[0] in settings.auto_import_modules:
|
||||
module = _load_builtin_module(evaluator, import_names, sys_path)
|
||||
module = _load_builtin_module(infer_state, import_names, sys_path)
|
||||
if module is None:
|
||||
return NO_CONTEXTS
|
||||
return ContextSet([module])
|
||||
@@ -388,7 +388,7 @@ def import_module(evaluator, import_names, parent_module_context, sys_path):
|
||||
if parent_module_context is None:
|
||||
# Override the sys.path. It works only good that way.
|
||||
# Injecting the path directly into `find_module` did not work.
|
||||
file_io_or_ns, is_pkg = evaluator.compiled_subprocess.get_module_info(
|
||||
file_io_or_ns, is_pkg = infer_state.compiled_subprocess.get_module_info(
|
||||
string=import_names[-1],
|
||||
full_name=module_name,
|
||||
sys_path=sys_path,
|
||||
@@ -409,7 +409,7 @@ def import_module(evaluator, import_names, parent_module_context, sys_path):
|
||||
# not important to be correct.
|
||||
if not isinstance(path, list):
|
||||
path = [path]
|
||||
file_io_or_ns, is_pkg = evaluator.compiled_subprocess.get_module_info(
|
||||
file_io_or_ns, is_pkg = infer_state.compiled_subprocess.get_module_info(
|
||||
string=import_names[-1],
|
||||
path=path,
|
||||
full_name=module_name,
|
||||
@@ -423,17 +423,17 @@ def import_module(evaluator, import_names, parent_module_context, sys_path):
|
||||
if isinstance(file_io_or_ns, ImplicitNSInfo):
|
||||
from jedi.inference.context.namespace import ImplicitNamespaceContext
|
||||
module = ImplicitNamespaceContext(
|
||||
evaluator,
|
||||
infer_state,
|
||||
fullname=file_io_or_ns.name,
|
||||
paths=file_io_or_ns.paths,
|
||||
)
|
||||
elif file_io_or_ns is None:
|
||||
module = _load_builtin_module(evaluator, import_names, sys_path)
|
||||
module = _load_builtin_module(infer_state, import_names, sys_path)
|
||||
if module is None:
|
||||
return NO_CONTEXTS
|
||||
else:
|
||||
module = _load_python_module(
|
||||
evaluator, file_io_or_ns, sys_path,
|
||||
infer_state, file_io_or_ns, sys_path,
|
||||
import_names=import_names,
|
||||
is_package=is_pkg,
|
||||
)
|
||||
@@ -445,14 +445,14 @@ def import_module(evaluator, import_names, parent_module_context, sys_path):
|
||||
return ContextSet([module])
|
||||
|
||||
|
||||
def _load_python_module(evaluator, file_io, sys_path=None,
|
||||
def _load_python_module(infer_state, file_io, sys_path=None,
|
||||
import_names=None, is_package=False):
|
||||
try:
|
||||
return evaluator.module_cache.get_from_path(file_io.path)
|
||||
return infer_state.module_cache.get_from_path(file_io.path)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
module_node = evaluator.parse(
|
||||
module_node = infer_state.parse(
|
||||
file_io=file_io,
|
||||
cache=True,
|
||||
diff_cache=settings.fast_parser,
|
||||
@@ -461,21 +461,21 @@ def _load_python_module(evaluator, file_io, sys_path=None,
|
||||
|
||||
from jedi.inference.context import ModuleContext
|
||||
return ModuleContext(
|
||||
evaluator, module_node,
|
||||
infer_state, module_node,
|
||||
file_io=file_io,
|
||||
string_names=import_names,
|
||||
code_lines=get_cached_code_lines(evaluator.grammar, file_io.path),
|
||||
code_lines=get_cached_code_lines(infer_state.grammar, file_io.path),
|
||||
is_package=is_package,
|
||||
)
|
||||
|
||||
|
||||
def _load_builtin_module(evaluator, import_names=None, sys_path=None):
|
||||
def _load_builtin_module(infer_state, import_names=None, sys_path=None):
|
||||
if sys_path is None:
|
||||
sys_path = evaluator.get_sys_path()
|
||||
sys_path = infer_state.get_sys_path()
|
||||
|
||||
dotted_name = '.'.join(import_names)
|
||||
assert dotted_name is not None
|
||||
module = compiled.load_module(evaluator, dotted_name=dotted_name, sys_path=sys_path)
|
||||
module = compiled.load_module(infer_state, dotted_name=dotted_name, sys_path=sys_path)
|
||||
if module is None:
|
||||
# The file might raise an ImportError e.g. and therefore not be
|
||||
# importable.
|
||||
@@ -483,13 +483,13 @@ def _load_builtin_module(evaluator, import_names=None, sys_path=None):
|
||||
return module
|
||||
|
||||
|
||||
def _load_module_from_path(evaluator, file_io, base_names):
|
||||
def _load_module_from_path(infer_state, file_io, base_names):
|
||||
"""
|
||||
This should pretty much only be used for get_modules_containing_name. It's
|
||||
here to ensure that a random path is still properly loaded into the Jedi
|
||||
module structure.
|
||||
"""
|
||||
e_sys_path = evaluator.get_sys_path()
|
||||
e_sys_path = infer_state.get_sys_path()
|
||||
path = file_io.path
|
||||
if base_names:
|
||||
module_name = os.path.basename(path)
|
||||
@@ -503,16 +503,16 @@ def _load_module_from_path(evaluator, file_io, base_names):
|
||||
import_names, is_package = sys_path.transform_path_to_dotted(e_sys_path, path)
|
||||
|
||||
module = _load_python_module(
|
||||
evaluator, file_io,
|
||||
infer_state, file_io,
|
||||
sys_path=e_sys_path,
|
||||
import_names=import_names,
|
||||
is_package=is_package,
|
||||
)
|
||||
evaluator.module_cache.add(import_names, ContextSet([module]))
|
||||
infer_state.module_cache.add(import_names, ContextSet([module]))
|
||||
return module
|
||||
|
||||
|
||||
def get_modules_containing_name(evaluator, modules, name):
|
||||
def get_modules_containing_name(infer_state, modules, name):
|
||||
"""
|
||||
Search a name in the directories of modules.
|
||||
"""
|
||||
@@ -530,7 +530,7 @@ def get_modules_containing_name(evaluator, modules, name):
|
||||
if name not in code:
|
||||
return None
|
||||
new_file_io = KnownContentFileIO(file_io.path, code)
|
||||
m = _load_module_from_path(evaluator, new_file_io, base_names)
|
||||
m = _load_module_from_path(infer_state, new_file_io, base_names)
|
||||
if isinstance(m, compiled.CompiledObject):
|
||||
return None
|
||||
return m
|
||||
|
||||
@@ -66,10 +66,10 @@ class AbstractArbitraryName(AbstractNameDefinition):
|
||||
"""
|
||||
is_context_name = False
|
||||
|
||||
def __init__(self, evaluator, string):
|
||||
self.evaluator = evaluator
|
||||
def __init__(self, infer_state, string):
|
||||
self.infer_state = infer_state
|
||||
self.string_name = string
|
||||
self.parent_context = evaluator.builtins_module
|
||||
self.parent_context = infer_state.builtins_module
|
||||
|
||||
def infer(self):
|
||||
return NO_CONTEXTS
|
||||
@@ -103,7 +103,7 @@ class AbstractTreeName(AbstractNameDefinition):
|
||||
return parent_names + (self.tree_name.value,)
|
||||
|
||||
def goto(self, **kwargs):
|
||||
return self.parent_context.evaluator.goto(self.parent_context, self.tree_name, **kwargs)
|
||||
return self.parent_context.infer_state.goto(self.parent_context, self.tree_name, **kwargs)
|
||||
|
||||
def is_import(self):
|
||||
imp = search_ancestor(self.tree_name, 'import_from', 'import_name')
|
||||
@@ -157,7 +157,7 @@ class TreeNameDefinition(AbstractTreeName):
|
||||
# Refactor this, should probably be here.
|
||||
from jedi.inference.syntax_tree import tree_name_to_contexts
|
||||
parent = self.parent_context
|
||||
return tree_name_to_contexts(parent.evaluator, parent, self.tree_name)
|
||||
return tree_name_to_contexts(parent.infer_state, parent, self.tree_name)
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
@@ -346,7 +346,7 @@ class ImportName(AbstractNameDefinition):
|
||||
def infer(self):
|
||||
from jedi.inference.imports import Importer
|
||||
m = self._from_module_context
|
||||
return Importer(m.evaluator, [self.string_name], m, level=self._level).follow()
|
||||
return Importer(m.infer_state, [self.string_name], m, level=self._level).follow()
|
||||
|
||||
def goto(self):
|
||||
return [m.name for m in self.infer()]
|
||||
|
||||
@@ -145,13 +145,13 @@ def get_executed_params_and_issues(execution_context, arguments):
|
||||
var_arg_iterator.push_back((key, argument))
|
||||
break
|
||||
lazy_context_list.append(argument)
|
||||
seq = iterable.FakeSequence(execution_context.evaluator, u'tuple', lazy_context_list)
|
||||
seq = iterable.FakeSequence(execution_context.infer_state, u'tuple', lazy_context_list)
|
||||
result_arg = LazyKnownContext(seq)
|
||||
elif param.star_count == 2:
|
||||
if argument is not None:
|
||||
too_many_args(argument)
|
||||
# **kwargs param
|
||||
dct = iterable.FakeDict(execution_context.evaluator, dict(non_matching_keys))
|
||||
dct = iterable.FakeDict(execution_context.infer_state, dict(non_matching_keys))
|
||||
result_arg = LazyKnownContext(dct)
|
||||
non_matching_keys = {}
|
||||
else:
|
||||
@@ -235,11 +235,11 @@ def _error_argument_count(funcdef, actual_count):
|
||||
def _create_default_param(execution_context, param):
|
||||
if param.star_count == 1:
|
||||
result_arg = LazyKnownContext(
|
||||
iterable.FakeSequence(execution_context.evaluator, u'tuple', [])
|
||||
iterable.FakeSequence(execution_context.infer_state, u'tuple', [])
|
||||
)
|
||||
elif param.star_count == 2:
|
||||
result_arg = LazyKnownContext(
|
||||
iterable.FakeDict(execution_context.evaluator, {})
|
||||
iterable.FakeDict(execution_context.infer_state, {})
|
||||
)
|
||||
elif param.default is None:
|
||||
result_arg = LazyUnknownContext()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from jedi.inference.cache import evaluator_function_cache
|
||||
from jedi.inference.cache import infer_state_function_cache
|
||||
|
||||
|
||||
@evaluator_function_cache()
|
||||
def get_yield_exprs(evaluator, funcdef):
|
||||
@infer_state_function_cache()
|
||||
def get_yield_exprs(infer_state, funcdef):
|
||||
return list(funcdef.iter_yield_exprs())
|
||||
|
||||
@@ -56,12 +56,12 @@ class RecursionDetector(object):
|
||||
|
||||
|
||||
@contextmanager
|
||||
def execution_allowed(evaluator, node):
|
||||
def execution_allowed(infer_state, node):
|
||||
"""
|
||||
A decorator to detect recursions in statements. In a recursion a statement
|
||||
at the same place, in the same module may not be executed two times.
|
||||
"""
|
||||
pushed_nodes = evaluator.recursion_detector.pushed_nodes
|
||||
pushed_nodes = infer_state.recursion_detector.pushed_nodes
|
||||
|
||||
if node in pushed_nodes:
|
||||
debug.warning('catched stmt recursion: %s @%s', node,
|
||||
@@ -78,7 +78,7 @@ def execution_allowed(evaluator, node):
|
||||
def execution_recursion_decorator(default=NO_CONTEXTS):
|
||||
def decorator(func):
|
||||
def wrapper(self, **kwargs):
|
||||
detector = self.evaluator.execution_recursion_detector
|
||||
detector = self.infer_state.execution_recursion_detector
|
||||
limit_reached = detector.push_execution(self)
|
||||
try:
|
||||
if limit_reached:
|
||||
@@ -96,8 +96,8 @@ class ExecutionRecursionDetector(object):
|
||||
"""
|
||||
Catches recursions of executions.
|
||||
"""
|
||||
def __init__(self, evaluator):
|
||||
self._evaluator = evaluator
|
||||
def __init__(self, infer_state):
|
||||
self._infer_state = infer_state
|
||||
|
||||
self._recursion_level = 0
|
||||
self._parent_execution_funcs = []
|
||||
@@ -117,7 +117,7 @@ class ExecutionRecursionDetector(object):
|
||||
|
||||
module = execution.get_root_context()
|
||||
|
||||
if module == self._evaluator.builtins_module:
|
||||
if module == self._infer_state.builtins_module:
|
||||
# We have control over builtins so we know they are not recursing
|
||||
# like crazy. Therefore we just let them execute always, because
|
||||
# they usually just help a lot with getting good results.
|
||||
|
||||
@@ -40,7 +40,7 @@ def _iter_nodes_for_param(param_name):
|
||||
contexts = _to_callables(context, trailer)
|
||||
|
||||
args = TreeArguments.create_cached(
|
||||
execution_context.evaluator,
|
||||
execution_context.infer_state,
|
||||
context=context,
|
||||
argument_node=trailer.children[1],
|
||||
trailer=trailer,
|
||||
@@ -66,7 +66,7 @@ def _to_callables(context, trailer):
|
||||
|
||||
atom_expr = trailer.parent
|
||||
index = atom_expr.children[0] == 'await'
|
||||
# Eval atom first
|
||||
# Infer atom first
|
||||
contexts = context.infer_node(atom_expr.children[index])
|
||||
for trailer2 in atom_expr.children[index + 1:]:
|
||||
if trailer == trailer2:
|
||||
|
||||
@@ -23,7 +23,7 @@ from jedi.inference.context import TreeInstance
|
||||
from jedi.inference.finder import NameFinder
|
||||
from jedi.inference.helpers import is_string, is_literal, is_number
|
||||
from jedi.inference.compiled.access import COMPARISON_OPERATORS
|
||||
from jedi.inference.cache import evaluator_method_cache
|
||||
from jedi.inference.cache import infer_state_method_cache
|
||||
from jedi.inference.gradual.stub_context import VersionInfo
|
||||
from jedi.inference.gradual import annotation
|
||||
from jedi.inference.context.decorator import Decoratee
|
||||
@@ -41,14 +41,14 @@ def _limit_context_infers(func):
|
||||
"""
|
||||
def wrapper(context, *args, **kwargs):
|
||||
n = context.tree_node
|
||||
evaluator = context.evaluator
|
||||
infer_state = context.infer_state
|
||||
try:
|
||||
evaluator.inferred_element_counts[n] += 1
|
||||
if evaluator.inferred_element_counts[n] > 300:
|
||||
infer_state.inferred_element_counts[n] += 1
|
||||
if infer_state.inferred_element_counts[n] > 300:
|
||||
debug.warning('In context %s there were too many inferences.', n)
|
||||
return NO_CONTEXTS
|
||||
except KeyError:
|
||||
evaluator.inferred_element_counts[n] = 1
|
||||
infer_state.inferred_element_counts[n] = 1
|
||||
return func(context, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
@@ -70,7 +70,7 @@ def _py__stop_iteration_returns(generators):
|
||||
@_limit_context_infers
|
||||
def infer_node(context, element):
|
||||
debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context)
|
||||
evaluator = context.evaluator
|
||||
infer_state = context.infer_state
|
||||
typ = element.type
|
||||
if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'):
|
||||
return infer_atom(context, element)
|
||||
@@ -91,7 +91,7 @@ def infer_node(context, element):
|
||||
if trailer == '**': # has a power operation.
|
||||
right = context.infer_node(children[i + 1])
|
||||
context_set = _infer_comparison(
|
||||
evaluator,
|
||||
infer_state,
|
||||
context,
|
||||
context_set,
|
||||
trailer,
|
||||
@@ -105,7 +105,7 @@ def infer_node(context, element):
|
||||
return context_set
|
||||
elif typ in ('testlist_star_expr', 'testlist',):
|
||||
# The implicit tuple in statements.
|
||||
return ContextSet([iterable.SequenceLiteralContext(evaluator, context, element)])
|
||||
return ContextSet([iterable.SequenceLiteralContext(infer_state, context, element)])
|
||||
elif typ in ('not_test', 'factor'):
|
||||
context_set = context.infer_node(element.children[-1])
|
||||
for operator in element.children[:-1]:
|
||||
@@ -122,7 +122,7 @@ def infer_node(context, element):
|
||||
if element.value not in ('.', '...'):
|
||||
origin = element.parent
|
||||
raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin))
|
||||
return ContextSet([compiled.builtin_from_name(evaluator, u'Ellipsis')])
|
||||
return ContextSet([compiled.builtin_from_name(infer_state, u'Ellipsis')])
|
||||
elif typ == 'dotted_name':
|
||||
context_set = infer_atom(context, element.children[0])
|
||||
for next_name in element.children[2::2]:
|
||||
@@ -158,7 +158,7 @@ def infer_trailer(context, atom_contexts, trailer):
|
||||
if trailer_op == '[':
|
||||
trailer_op, node, _ = trailer.children
|
||||
return atom_contexts.get_item(
|
||||
infer_subscript_list(context.evaluator, context, node),
|
||||
infer_subscript_list(context.infer_state, context, node),
|
||||
ContextualizedNode(context, trailer)
|
||||
)
|
||||
else:
|
||||
@@ -170,7 +170,7 @@ def infer_trailer(context, atom_contexts, trailer):
|
||||
)
|
||||
else:
|
||||
assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op
|
||||
args = arguments.TreeArguments(context.evaluator, context, node, trailer)
|
||||
args = arguments.TreeArguments(context.infer_state, context, node, trailer)
|
||||
return atom_contexts.execute(args)
|
||||
|
||||
|
||||
@@ -183,7 +183,7 @@ def infer_atom(context, atom):
|
||||
if atom.type == 'name':
|
||||
if atom.value in ('True', 'False', 'None'):
|
||||
# Python 2...
|
||||
return ContextSet([compiled.builtin_from_name(context.evaluator, atom.value)])
|
||||
return ContextSet([compiled.builtin_from_name(context.infer_state, atom.value)])
|
||||
|
||||
# This is the first global lookup.
|
||||
stmt = tree.search_ancestor(
|
||||
@@ -207,7 +207,7 @@ def infer_atom(context, atom):
|
||||
elif atom.type == 'keyword':
|
||||
# For False/True/None
|
||||
if atom.value in ('False', 'True', 'None'):
|
||||
return ContextSet([compiled.builtin_from_name(context.evaluator, atom.value)])
|
||||
return ContextSet([compiled.builtin_from_name(context.infer_state, atom.value)])
|
||||
elif atom.value == 'print':
|
||||
# print e.g. could be inferred like this in Python 2.7
|
||||
return NO_CONTEXTS
|
||||
@@ -218,17 +218,17 @@ def infer_atom(context, atom):
|
||||
assert False, 'Cannot infer the keyword %s' % atom
|
||||
|
||||
elif isinstance(atom, tree.Literal):
|
||||
string = context.evaluator.compiled_subprocess.safe_literal_eval(atom.value)
|
||||
return ContextSet([compiled.create_simple_object(context.evaluator, string)])
|
||||
string = context.infer_state.compiled_subprocess.safe_literal_eval(atom.value)
|
||||
return ContextSet([compiled.create_simple_object(context.infer_state, string)])
|
||||
elif atom.type == 'strings':
|
||||
# Will be multiple string.
|
||||
context_set = infer_atom(context, atom.children[0])
|
||||
for string in atom.children[1:]:
|
||||
right = infer_atom(context, string)
|
||||
context_set = _infer_comparison(context.evaluator, context, context_set, u'+', right)
|
||||
context_set = _infer_comparison(context.infer_state, context, context_set, u'+', right)
|
||||
return context_set
|
||||
elif atom.type == 'fstring':
|
||||
return compiled.get_string_context_set(context.evaluator)
|
||||
return compiled.get_string_context_set(context.infer_state)
|
||||
else:
|
||||
c = atom.children
|
||||
# Parentheses without commas are not tuples.
|
||||
@@ -251,7 +251,7 @@ def infer_atom(context, atom):
|
||||
|
||||
if comp_for.type in ('comp_for', 'sync_comp_for'):
|
||||
return ContextSet([iterable.comprehension_from_atom(
|
||||
context.evaluator, context, atom
|
||||
context.infer_state, context, atom
|
||||
)])
|
||||
|
||||
# It's a dict/list/tuple literal.
|
||||
@@ -262,19 +262,19 @@ def infer_atom(context, atom):
|
||||
array_node_c = []
|
||||
if c[0] == '{' and (array_node == '}' or ':' in array_node_c or
|
||||
'**' in array_node_c):
|
||||
context = iterable.DictLiteralContext(context.evaluator, context, atom)
|
||||
context = iterable.DictLiteralContext(context.infer_state, context, atom)
|
||||
else:
|
||||
context = iterable.SequenceLiteralContext(context.evaluator, context, atom)
|
||||
context = iterable.SequenceLiteralContext(context.infer_state, context, atom)
|
||||
return ContextSet([context])
|
||||
|
||||
|
||||
@_limit_context_infers
|
||||
def infer_expr_stmt(context, stmt, seek_name=None):
|
||||
with recursion.execution_allowed(context.evaluator, stmt) as allowed:
|
||||
with recursion.execution_allowed(context.infer_state, stmt) as allowed:
|
||||
# Here we allow list/set to recurse under certain conditions. To make
|
||||
# it possible to resolve stuff like list(set(list(x))), this is
|
||||
# necessary.
|
||||
if not allowed and context.get_root_context() == context.evaluator.builtins_module:
|
||||
if not allowed and context.get_root_context() == context.infer_state.builtins_module:
|
||||
try:
|
||||
instance = context.var_args.instance
|
||||
except AttributeError:
|
||||
@@ -306,7 +306,7 @@ def _infer_expr_stmt(context, stmt, seek_name=None):
|
||||
|
||||
if seek_name:
|
||||
c_node = ContextualizedName(context, seek_name)
|
||||
context_set = check_tuple_assignments(context.evaluator, c_node, context_set)
|
||||
context_set = check_tuple_assignments(context.infer_state, c_node, context_set)
|
||||
|
||||
first_operator = next(stmt.yield_operators(), None)
|
||||
if first_operator not in ('=', None) and first_operator.type == 'operator':
|
||||
@@ -331,10 +331,10 @@ def _infer_expr_stmt(context, stmt, seek_name=None):
|
||||
dct = {for_stmt.children[1].value: lazy_context.infer()}
|
||||
with helpers.predefine_names(context, for_stmt, dct):
|
||||
t = context.infer_node(rhs)
|
||||
left = _infer_comparison(context.evaluator, context, left, operator, t)
|
||||
left = _infer_comparison(context.infer_state, context, left, operator, t)
|
||||
context_set = left
|
||||
else:
|
||||
context_set = _infer_comparison(context.evaluator, context, left, operator, context_set)
|
||||
context_set = _infer_comparison(context.infer_state, context, left, operator, context_set)
|
||||
debug.dbg('infer_expr_stmt result %s', context_set)
|
||||
return context_set
|
||||
|
||||
@@ -358,7 +358,7 @@ def infer_or_test(context, or_test):
|
||||
types = context.infer_node(right)
|
||||
# Otherwise continue, because of uncertainty.
|
||||
else:
|
||||
types = _infer_comparison(context.evaluator, context, types, operator,
|
||||
types = _infer_comparison(context.infer_state, context, types, operator,
|
||||
context.infer_node(right))
|
||||
debug.dbg('infer_or_test types %s', types)
|
||||
return types
|
||||
@@ -377,12 +377,12 @@ def infer_factor(context_set, operator):
|
||||
value = context.py__bool__()
|
||||
if value is None: # Uncertainty.
|
||||
return
|
||||
yield compiled.create_simple_object(context.evaluator, not value)
|
||||
yield compiled.create_simple_object(context.infer_state, not value)
|
||||
else:
|
||||
yield context
|
||||
|
||||
|
||||
def _literals_to_types(evaluator, result):
|
||||
def _literals_to_types(infer_state, result):
|
||||
# Changes literals ('a', 1, 1.0, etc) to its type instances (str(),
|
||||
# int(), float(), etc).
|
||||
new_result = NO_CONTEXTS
|
||||
@@ -390,27 +390,27 @@ def _literals_to_types(evaluator, result):
|
||||
if is_literal(typ):
|
||||
# Literals are only valid as long as the operations are
|
||||
# correct. Otherwise add a value-free instance.
|
||||
cls = compiled.builtin_from_name(evaluator, typ.name.string_name)
|
||||
cls = compiled.builtin_from_name(infer_state, typ.name.string_name)
|
||||
new_result |= cls.execute_with_values()
|
||||
else:
|
||||
new_result |= ContextSet([typ])
|
||||
return new_result
|
||||
|
||||
|
||||
def _infer_comparison(evaluator, context, left_contexts, operator, right_contexts):
|
||||
def _infer_comparison(infer_state, context, left_contexts, operator, right_contexts):
|
||||
if not left_contexts or not right_contexts:
|
||||
# illegal slices e.g. cause left/right_result to be None
|
||||
result = (left_contexts or NO_CONTEXTS) | (right_contexts or NO_CONTEXTS)
|
||||
return _literals_to_types(evaluator, result)
|
||||
return _literals_to_types(infer_state, result)
|
||||
else:
|
||||
# I don't think there's a reasonable chance that a string
|
||||
# operation is still correct, once we pass something like six
|
||||
# objects.
|
||||
if len(left_contexts) * len(right_contexts) > 6:
|
||||
return _literals_to_types(evaluator, left_contexts | right_contexts)
|
||||
return _literals_to_types(infer_state, left_contexts | right_contexts)
|
||||
else:
|
||||
return ContextSet.from_sets(
|
||||
_infer_comparison_part(evaluator, context, left, operator, right)
|
||||
_infer_comparison_part(infer_state, context, left, operator, right)
|
||||
for left in left_contexts
|
||||
for right in right_contexts
|
||||
)
|
||||
@@ -440,8 +440,8 @@ def _is_list(context):
|
||||
return isinstance(context, iterable.Sequence) and context.array_type == 'list'
|
||||
|
||||
|
||||
def _bool_to_context(evaluator, bool_):
|
||||
return compiled.builtin_from_name(evaluator, force_unicode(str(bool_)))
|
||||
def _bool_to_context(infer_state, bool_):
|
||||
return compiled.builtin_from_name(infer_state, force_unicode(str(bool_)))
|
||||
|
||||
|
||||
def _get_tuple_ints(context):
|
||||
@@ -461,7 +461,7 @@ def _get_tuple_ints(context):
|
||||
return numbers
|
||||
|
||||
|
||||
def _infer_comparison_part(evaluator, context, left, operator, right):
|
||||
def _infer_comparison_part(infer_state, context, left, operator, right):
|
||||
l_is_num = is_number(left)
|
||||
r_is_num = is_number(right)
|
||||
if isinstance(operator, unicode):
|
||||
@@ -479,7 +479,7 @@ def _infer_comparison_part(evaluator, context, left, operator, right):
|
||||
if l_is_num and r_is_num or is_string(left) and is_string(right):
|
||||
return ContextSet([left.execute_operation(right, str_operator)])
|
||||
elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right):
|
||||
return ContextSet([iterable.MergedArray(evaluator, (left, right))])
|
||||
return ContextSet([iterable.MergedArray(infer_state, (left, right))])
|
||||
elif str_operator == '-':
|
||||
if l_is_num and r_is_num:
|
||||
return ContextSet([left.execute_operation(right, str_operator)])
|
||||
@@ -499,18 +499,18 @@ def _infer_comparison_part(evaluator, context, left, operator, right):
|
||||
if str_operator in ('is', '!=', '==', 'is not'):
|
||||
operation = COMPARISON_OPERATORS[str_operator]
|
||||
bool_ = operation(left, right)
|
||||
return ContextSet([_bool_to_context(evaluator, bool_)])
|
||||
return ContextSet([_bool_to_context(infer_state, bool_)])
|
||||
|
||||
if isinstance(left, VersionInfo):
|
||||
version_info = _get_tuple_ints(right)
|
||||
if version_info is not None:
|
||||
bool_result = compiled.access.COMPARISON_OPERATORS[operator](
|
||||
evaluator.environment.version_info,
|
||||
infer_state.environment.version_info,
|
||||
tuple(version_info)
|
||||
)
|
||||
return ContextSet([_bool_to_context(evaluator, bool_result)])
|
||||
return ContextSet([_bool_to_context(infer_state, bool_result)])
|
||||
|
||||
return ContextSet([_bool_to_context(evaluator, True), _bool_to_context(evaluator, False)])
|
||||
return ContextSet([_bool_to_context(infer_state, True), _bool_to_context(infer_state, False)])
|
||||
elif str_operator == 'in':
|
||||
return NO_CONTEXTS
|
||||
|
||||
@@ -531,7 +531,7 @@ def _infer_comparison_part(evaluator, context, left, operator, right):
|
||||
return result
|
||||
|
||||
|
||||
def _remove_statements(evaluator, context, stmt, name):
|
||||
def _remove_statements(infer_state, context, stmt, name):
|
||||
"""
|
||||
This is the part where statements are being stripped.
|
||||
|
||||
@@ -547,7 +547,7 @@ def _remove_statements(evaluator, context, stmt, name):
|
||||
|
||||
|
||||
@plugin_manager.decorate()
|
||||
def tree_name_to_contexts(evaluator, context, tree_name):
|
||||
def tree_name_to_contexts(infer_state, context, tree_name):
|
||||
context_set = NO_CONTEXTS
|
||||
module_node = context.get_root_context().tree_node
|
||||
# First check for annotations, like: `foo: int = 3`
|
||||
@@ -570,15 +570,15 @@ def tree_name_to_contexts(evaluator, context, tree_name):
|
||||
if node is None:
|
||||
node = tree_name.parent
|
||||
if node.type == 'global_stmt':
|
||||
context = evaluator.create_context(context, tree_name)
|
||||
finder = NameFinder(evaluator, context, context, tree_name.value)
|
||||
context = infer_state.create_context(context, tree_name)
|
||||
finder = NameFinder(infer_state, context, context, tree_name.value)
|
||||
filters = finder.get_filters(search_global=True)
|
||||
# For global_stmt lookups, we only need the first possible scope,
|
||||
# which means the function itself.
|
||||
filters = [next(filters)]
|
||||
return finder.find(filters, attribute_lookup=False)
|
||||
elif node.type not in ('import_from', 'import_name'):
|
||||
context = evaluator.create_context(context, tree_name)
|
||||
context = infer_state.create_context(context, tree_name)
|
||||
return infer_atom(context, tree_name)
|
||||
|
||||
typ = node.type
|
||||
@@ -602,9 +602,9 @@ def tree_name_to_contexts(evaluator, context, tree_name):
|
||||
is_async=node.parent.type == 'async_stmt',
|
||||
)
|
||||
c_node = ContextualizedName(context, tree_name)
|
||||
types = check_tuple_assignments(evaluator, c_node, for_types)
|
||||
types = check_tuple_assignments(infer_state, c_node, for_types)
|
||||
elif typ == 'expr_stmt':
|
||||
types = _remove_statements(evaluator, context, node, tree_name)
|
||||
types = _remove_statements(infer_state, context, node, tree_name)
|
||||
elif typ == 'with_stmt':
|
||||
context_managers = context.infer_node(node.get_test_node_from_name(tree_name))
|
||||
enter_methods = context_managers.py__getattribute__(u'__enter__')
|
||||
@@ -628,7 +628,7 @@ def tree_name_to_contexts(evaluator, context, tree_name):
|
||||
|
||||
# We don't want to have functions/classes that are created by the same
|
||||
# tree_node.
|
||||
@evaluator_method_cache()
|
||||
@infer_state_method_cache()
|
||||
def _apply_decorators(context, node):
|
||||
"""
|
||||
Returns the function, that should to be executed in the end.
|
||||
@@ -636,7 +636,7 @@ def _apply_decorators(context, node):
|
||||
"""
|
||||
if node.type == 'classdef':
|
||||
decoratee_context = ClassContext(
|
||||
context.evaluator,
|
||||
context.infer_state,
|
||||
parent_context=context,
|
||||
tree_node=node
|
||||
)
|
||||
@@ -674,7 +674,7 @@ def _apply_decorators(context, node):
|
||||
return values
|
||||
|
||||
|
||||
def check_tuple_assignments(evaluator, contextualized_name, context_set):
|
||||
def check_tuple_assignments(infer_state, contextualized_name, context_set):
|
||||
"""
|
||||
Checks if tuples are assigned.
|
||||
"""
|
||||
@@ -698,7 +698,7 @@ def check_tuple_assignments(evaluator, contextualized_name, context_set):
|
||||
return context_set
|
||||
|
||||
|
||||
def infer_subscript_list(evaluator, context, index):
|
||||
def infer_subscript_list(infer_state, context, index):
|
||||
"""
|
||||
Handles slices in subscript nodes.
|
||||
"""
|
||||
@@ -724,7 +724,7 @@ def infer_subscript_list(evaluator, context, index):
|
||||
|
||||
return ContextSet([iterable.Slice(context, *result)])
|
||||
elif index.type == 'subscriptlist':
|
||||
return ContextSet([iterable.SequenceLiteralContext(evaluator, context, index)])
|
||||
return ContextSet([iterable.SequenceLiteralContext(infer_state, context, index)])
|
||||
|
||||
# No slices
|
||||
return context.infer_node(index)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
|
||||
from jedi._compatibility import unicode, force_unicode, all_suffixes
|
||||
from jedi.inference.cache import evaluator_method_cache
|
||||
from jedi.inference.cache import infer_state_method_cache
|
||||
from jedi.inference.base_context import ContextualizedNode
|
||||
from jedi.inference.helpers import is_string
|
||||
from jedi.common.utils import traverse_parents
|
||||
@@ -92,7 +92,7 @@ def _paths_from_list_modifications(module_context, trailer1, trailer2):
|
||||
yield abs_path
|
||||
|
||||
|
||||
@evaluator_method_cache(default=[])
|
||||
@infer_state_method_cache(default=[])
|
||||
def check_sys_path_modifications(module_context):
|
||||
"""
|
||||
Detect sys.path modifications within module.
|
||||
@@ -130,20 +130,20 @@ def check_sys_path_modifications(module_context):
|
||||
return added
|
||||
|
||||
|
||||
def discover_buildout_paths(evaluator, script_path):
|
||||
def discover_buildout_paths(infer_state, script_path):
|
||||
buildout_script_paths = set()
|
||||
|
||||
for buildout_script_path in _get_buildout_script_paths(script_path):
|
||||
for path in _get_paths_from_buildout_script(evaluator, buildout_script_path):
|
||||
for path in _get_paths_from_buildout_script(infer_state, buildout_script_path):
|
||||
buildout_script_paths.add(path)
|
||||
|
||||
return buildout_script_paths
|
||||
|
||||
|
||||
def _get_paths_from_buildout_script(evaluator, buildout_script_path):
|
||||
def _get_paths_from_buildout_script(infer_state, buildout_script_path):
|
||||
file_io = FileIO(buildout_script_path)
|
||||
try:
|
||||
module_node = evaluator.parse(
|
||||
module_node = infer_state.parse(
|
||||
file_io=file_io,
|
||||
cache=True,
|
||||
cache_path=settings.cache_directory
|
||||
@@ -154,9 +154,9 @@ def _get_paths_from_buildout_script(evaluator, buildout_script_path):
|
||||
|
||||
from jedi.inference.context import ModuleContext
|
||||
module = ModuleContext(
|
||||
evaluator, module_node, file_io,
|
||||
infer_state, module_node, file_io,
|
||||
string_names=None,
|
||||
code_lines=get_cached_code_lines(evaluator.grammar, buildout_script_path),
|
||||
code_lines=get_cached_code_lines(infer_state.grammar, buildout_script_path),
|
||||
)
|
||||
for path in check_sys_path_modifications(module):
|
||||
yield path
|
||||
|
||||
@@ -41,7 +41,7 @@ def usages(module_context, tree_name):
|
||||
modules = set(m for m in modules if m.is_module() and not m.is_compiled())
|
||||
|
||||
non_matching_usage_maps = {}
|
||||
for m in imports.get_modules_containing_name(module_context.evaluator, modules, search_name):
|
||||
for m in imports.get_modules_containing_name(module_context.infer_state, modules, search_name):
|
||||
for name_leaf in m.tree_node.get_used_names().get(search_name, []):
|
||||
new = _find_names(m, name_leaf)
|
||||
if any(tree_name in found_names for tree_name in new):
|
||||
|
||||
@@ -3,19 +3,19 @@ def import_module(callback):
|
||||
Handle "magic" Flask extension imports:
|
||||
``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
|
||||
"""
|
||||
def wrapper(evaluator, import_names, module_context, *args, **kwargs):
|
||||
def wrapper(infer_state, import_names, module_context, *args, **kwargs):
|
||||
if len(import_names) == 3 and import_names[:2] == ('flask', 'ext'):
|
||||
# New style.
|
||||
ipath = (u'flask_' + import_names[2]),
|
||||
context_set = callback(evaluator, ipath, None, *args, **kwargs)
|
||||
context_set = callback(infer_state, ipath, None, *args, **kwargs)
|
||||
if context_set:
|
||||
return context_set
|
||||
context_set = callback(evaluator, (u'flaskext',), None, *args, **kwargs)
|
||||
context_set = callback(infer_state, (u'flaskext',), None, *args, **kwargs)
|
||||
return callback(
|
||||
evaluator,
|
||||
infer_state,
|
||||
(u'flaskext', import_names[2]),
|
||||
next(iter(context_set)),
|
||||
*args, **kwargs
|
||||
)
|
||||
return callback(evaluator, import_names, module_context, *args, **kwargs)
|
||||
return callback(infer_state, import_names, module_context, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
@@ -114,7 +114,7 @@ def execute(callback):
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if context.parent_context == context.evaluator.builtins_module:
|
||||
if context.parent_context == context.infer_state.builtins_module:
|
||||
module_name = 'builtins'
|
||||
elif context.parent_context is not None and context.parent_context.is_module():
|
||||
module_name = context.parent_context.py__name__()
|
||||
@@ -148,7 +148,7 @@ def execute(callback):
|
||||
return wrapper
|
||||
|
||||
|
||||
def _follow_param(evaluator, arguments, index):
|
||||
def _follow_param(infer_state, arguments, index):
|
||||
try:
|
||||
key, lazy_context = list(arguments.unpack())[index]
|
||||
except IndexError:
|
||||
@@ -158,7 +158,7 @@ def _follow_param(evaluator, arguments, index):
|
||||
|
||||
|
||||
def argument_clinic(string, want_obj=False, want_context=False,
|
||||
want_arguments=False, want_evaluator=False,
|
||||
want_arguments=False, want_infer_state=False,
|
||||
want_callback=False):
|
||||
"""
|
||||
Works like Argument Clinic (PEP 436), to validate function params.
|
||||
@@ -177,8 +177,8 @@ def argument_clinic(string, want_obj=False, want_context=False,
|
||||
kwargs['context'] = arguments.context
|
||||
if want_obj:
|
||||
kwargs['obj'] = obj
|
||||
if want_evaluator:
|
||||
kwargs['evaluator'] = obj.evaluator
|
||||
if want_infer_state:
|
||||
kwargs['infer_state'] = obj.infer_state
|
||||
if want_arguments:
|
||||
kwargs['arguments'] = arguments
|
||||
if want_callback:
|
||||
@@ -202,9 +202,9 @@ def builtins_property(objects, types, obj, arguments):
|
||||
return lazy_context.infer().py__call__(arguments=ValuesArguments([objects]))
|
||||
|
||||
|
||||
@argument_clinic('iterator[, default], /', want_evaluator=True)
|
||||
def builtins_next(iterators, defaults, evaluator):
|
||||
if evaluator.environment.version_info.major == 2:
|
||||
@argument_clinic('iterator[, default], /', want_infer_state=True)
|
||||
def builtins_next(iterators, defaults, infer_state):
|
||||
if infer_state.environment.version_info.major == 2:
|
||||
name = 'next'
|
||||
else:
|
||||
name = '__next__'
|
||||
@@ -245,8 +245,8 @@ def builtins_type(objects, bases, dicts):
|
||||
|
||||
class SuperInstance(LazyContextWrapper):
|
||||
"""To be used like the object ``super`` returns."""
|
||||
def __init__(self, evaluator, instance):
|
||||
self.evaluator = evaluator
|
||||
def __init__(self, infer_state, instance):
|
||||
self.infer_state = infer_state
|
||||
self._instance = instance # Corresponds to super().__self__
|
||||
|
||||
def _get_bases(self):
|
||||
@@ -274,7 +274,7 @@ def builtins_super(types, objects, context):
|
||||
instance = context.var_args.instance
|
||||
# TODO if a class is given it doesn't have to be the direct super
|
||||
# class, it can be an anecestor from long ago.
|
||||
return ContextSet({SuperInstance(instance.evaluator, instance)})
|
||||
return ContextSet({SuperInstance(instance.infer_state, instance)})
|
||||
|
||||
return NO_CONTEXTS
|
||||
|
||||
@@ -312,12 +312,12 @@ def builtins_reversed(sequences, obj, arguments):
|
||||
# necessary, because `reversed` is a function and autocompletion
|
||||
# would fail in certain cases like `reversed(x).__iter__` if we
|
||||
# just returned the result directly.
|
||||
seq, = obj.evaluator.typing_module.py__getattribute__('Iterator').execute_with_values()
|
||||
seq, = obj.infer_state.typing_module.py__getattribute__('Iterator').execute_with_values()
|
||||
return ContextSet([ReversedObject(seq, list(reversed(ordered)))])
|
||||
|
||||
|
||||
@argument_clinic('obj, type, /', want_arguments=True, want_evaluator=True)
|
||||
def builtins_isinstance(objects, types, arguments, evaluator):
|
||||
@argument_clinic('obj, type, /', want_arguments=True, want_infer_state=True)
|
||||
def builtins_isinstance(objects, types, arguments, infer_state):
|
||||
bool_results = set()
|
||||
for o in objects:
|
||||
cls = o.py__class__()
|
||||
@@ -336,7 +336,7 @@ def builtins_isinstance(objects, types, arguments, evaluator):
|
||||
if cls_or_tup.is_class():
|
||||
bool_results.add(cls_or_tup in mro)
|
||||
elif cls_or_tup.name.string_name == 'tuple' \
|
||||
and cls_or_tup.get_root_context() == evaluator.builtins_module:
|
||||
and cls_or_tup.get_root_context() == infer_state.builtins_module:
|
||||
# Check for tuples.
|
||||
classes = ContextSet.from_sets(
|
||||
lazy_context.infer()
|
||||
@@ -353,7 +353,7 @@ def builtins_isinstance(objects, types, arguments, evaluator):
|
||||
analysis.add(lazy_context.context, 'type-error-isinstance', node, message)
|
||||
|
||||
return ContextSet(
|
||||
compiled.builtin_from_name(evaluator, force_unicode(str(b)))
|
||||
compiled.builtin_from_name(infer_state, force_unicode(str(b)))
|
||||
for b in bool_results
|
||||
)
|
||||
|
||||
@@ -430,18 +430,18 @@ def collections_namedtuple(obj, arguments, callback):
|
||||
inferring the result.
|
||||
|
||||
"""
|
||||
evaluator = obj.evaluator
|
||||
infer_state = obj.infer_state
|
||||
|
||||
# Process arguments
|
||||
name = u'jedi_unknown_namedtuple'
|
||||
for c in _follow_param(evaluator, arguments, 0):
|
||||
for c in _follow_param(infer_state, arguments, 0):
|
||||
x = get_str_or_none(c)
|
||||
if x is not None:
|
||||
name = force_unicode(x)
|
||||
break
|
||||
|
||||
# TODO here we only use one of the types, we should use all.
|
||||
param_contexts = _follow_param(evaluator, arguments, 1)
|
||||
param_contexts = _follow_param(infer_state, arguments, 1)
|
||||
if not param_contexts:
|
||||
return NO_CONTEXTS
|
||||
_fields = list(param_contexts)[0]
|
||||
@@ -470,16 +470,16 @@ def collections_namedtuple(obj, arguments, callback):
|
||||
)
|
||||
|
||||
# Parse source code
|
||||
module = evaluator.grammar.parse(code)
|
||||
module = infer_state.grammar.parse(code)
|
||||
generated_class = next(module.iter_classdefs())
|
||||
parent_context = ModuleContext(
|
||||
evaluator, module,
|
||||
infer_state, module,
|
||||
file_io=None,
|
||||
string_names=None,
|
||||
code_lines=parso.split_lines(code, keepends=True),
|
||||
)
|
||||
|
||||
return ContextSet([ClassContext(evaluator, parent_context, generated_class)])
|
||||
return ContextSet([ClassContext(infer_state, parent_context, generated_class)])
|
||||
|
||||
|
||||
class PartialObject(object):
|
||||
@@ -571,7 +571,7 @@ def _random_choice(sequences):
|
||||
|
||||
|
||||
def _dataclass(obj, arguments, callback):
|
||||
for c in _follow_param(obj.evaluator, arguments, 0):
|
||||
for c in _follow_param(obj.infer_state, arguments, 0):
|
||||
if c.is_class():
|
||||
return ContextSet([DataclassWrapper(c)])
|
||||
else:
|
||||
@@ -645,7 +645,7 @@ class ItemGetterCallable(ContextWrapper):
|
||||
context_set |= item_context_set.get_item(lazy_contexts[0].infer(), None)
|
||||
else:
|
||||
context_set |= ContextSet([iterable.FakeSequence(
|
||||
self._wrapped_context.evaluator,
|
||||
self._wrapped_context.infer_state,
|
||||
'list',
|
||||
[
|
||||
LazyKnownContexts(item_context_set.get_item(lazy_context.infer(), None))
|
||||
@@ -698,7 +698,7 @@ def _create_string_input_function(func):
|
||||
s = get_str_or_none(context)
|
||||
if s is not None:
|
||||
s = func(s)
|
||||
yield compiled.create_simple_object(context.evaluator, s)
|
||||
yield compiled.create_simple_object(context.infer_state, s)
|
||||
contexts = ContextSet(iterate())
|
||||
if contexts:
|
||||
return contexts
|
||||
@@ -724,7 +724,7 @@ def _os_path_join(args_set, callback):
|
||||
string += force_unicode(s)
|
||||
is_first = False
|
||||
else:
|
||||
return ContextSet([compiled.create_simple_object(sequence.evaluator, string)])
|
||||
return ContextSet([compiled.create_simple_object(sequence.infer_state, string)])
|
||||
return callback()
|
||||
|
||||
|
||||
@@ -793,7 +793,7 @@ def get_metaclass_filters(func):
|
||||
for metaclass in metaclasses:
|
||||
if metaclass.py__name__() == 'EnumMeta' \
|
||||
and metaclass.get_root_context().py__name__() == 'enum':
|
||||
filter_ = ParserTreeFilter(cls.evaluator, context=cls)
|
||||
filter_ = ParserTreeFilter(cls.infer_state, context=cls)
|
||||
return [DictFilter({
|
||||
name.string_name: EnumInstance(cls, name).name for name in filter_.values()
|
||||
})]
|
||||
@@ -803,7 +803,7 @@ def get_metaclass_filters(func):
|
||||
|
||||
class EnumInstance(LazyContextWrapper):
|
||||
def __init__(self, cls, name):
|
||||
self.evaluator = cls.evaluator
|
||||
self.infer_state = cls.infer_state
|
||||
self._cls = cls # Corresponds to super().__self__
|
||||
self._name = name
|
||||
self.tree_node = self._name.tree_name
|
||||
@@ -818,7 +818,7 @@ class EnumInstance(LazyContextWrapper):
|
||||
|
||||
def get_filters(self, search_global=False, position=None, origin_scope=None):
|
||||
yield DictFilter(dict(
|
||||
name=compiled.create_simple_object(self.evaluator, self._name.string_name).name,
|
||||
name=compiled.create_simple_object(self.infer_state, self._name.string_name).name,
|
||||
value=self._name,
|
||||
))
|
||||
for f in self._get_wrapped_context().get_filters():
|
||||
@@ -826,10 +826,10 @@ class EnumInstance(LazyContextWrapper):
|
||||
|
||||
|
||||
def tree_name_to_contexts(func):
|
||||
def wrapper(evaluator, context, tree_name):
|
||||
def wrapper(infer_state, context, tree_name):
|
||||
if tree_name.value == 'sep' and context.is_module() and context.py__name__() == 'os.path':
|
||||
return ContextSet({
|
||||
compiled.create_simple_object(evaluator, os.path.sep),
|
||||
compiled.create_simple_object(infer_state, os.path.sep),
|
||||
})
|
||||
return func(evaluator, context, tree_name)
|
||||
return func(infer_state, context, tree_name)
|
||||
return wrapper
|
||||
|
||||
@@ -13,8 +13,8 @@ sys.path.append('a' +* '/thirdparty')
|
||||
#? ['inference']
|
||||
import inference
|
||||
|
||||
#? ['evaluator_function_cache']
|
||||
inference.Evaluator_fu
|
||||
#? ['infer_state_function_cache']
|
||||
inference.infer_state_fu
|
||||
|
||||
# Those don't work because dirname and abspath are not properly understood.
|
||||
##? ['jedi_']
|
||||
|
||||
@@ -162,10 +162,10 @@ def cwd_tmpdir(monkeypatch, tmpdir):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def evaluator(Script):
|
||||
return Script('')._evaluator
|
||||
def infer_state(Script):
|
||||
return Script('')._infer_state
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def same_process_evaluator(Script):
|
||||
return Script('', environment=InterpreterEnvironment())._evaluator
|
||||
def same_process_infer_state(Script):
|
||||
return Script('', environment=InterpreterEnvironment())._infer_state
|
||||
|
||||
@@ -212,7 +212,7 @@ class IntegrationTestCase(object):
|
||||
|
||||
def run_goto_definitions(self, compare_cb, environment):
|
||||
script = self.script(environment)
|
||||
evaluator = script._evaluator
|
||||
infer_state = script._infer_state
|
||||
|
||||
def comparison(definition):
|
||||
suffix = '()' if definition.type == 'instance' else ''
|
||||
@@ -232,13 +232,13 @@ class IntegrationTestCase(object):
|
||||
user_context = user_context.get_function_execution()
|
||||
element.parent = user_context.tree_node
|
||||
results = convert_contexts(
|
||||
evaluator.infer_element(user_context, element),
|
||||
infer_state.infer_element(user_context, element),
|
||||
)
|
||||
if not results:
|
||||
raise Exception('Could not resolve %s on line %s'
|
||||
% (match.string, self.line_nr - 1))
|
||||
|
||||
should_be |= set(Definition(evaluator, r.name) for r in results)
|
||||
should_be |= set(Definition(infer_state, r.name) for r in results)
|
||||
debug.dbg('Finished getting types', color='YELLOW')
|
||||
|
||||
# Because the objects have different ids, `repr`, then compare.
|
||||
|
||||
@@ -42,10 +42,10 @@ def test_versions(version):
|
||||
assert env.get_sys_path()
|
||||
|
||||
|
||||
def test_load_module(evaluator):
|
||||
access_path = evaluator.compiled_subprocess.load_module(
|
||||
def test_load_module(infer_state):
|
||||
access_path = infer_state.compiled_subprocess.load_module(
|
||||
dotted_name=u'math',
|
||||
sys_path=evaluator.get_sys_path()
|
||||
sys_path=infer_state.get_sys_path()
|
||||
)
|
||||
name, access_handle = access_path.accesses[0]
|
||||
|
||||
@@ -55,31 +55,31 @@ def test_load_module(evaluator):
|
||||
access_handle.py__mro__()
|
||||
|
||||
|
||||
def test_error_in_environment(evaluator, Script, environment):
|
||||
def test_error_in_environment(infer_state, Script, environment):
|
||||
if isinstance(environment, InterpreterEnvironment):
|
||||
pytest.skip("We don't catch these errors at the moment.")
|
||||
|
||||
# Provoke an error to show how Jedi can recover from it.
|
||||
with pytest.raises(jedi.InternalError):
|
||||
evaluator.compiled_subprocess._test_raise_error(KeyboardInterrupt)
|
||||
infer_state.compiled_subprocess._test_raise_error(KeyboardInterrupt)
|
||||
# The second time it should raise an InternalError again.
|
||||
with pytest.raises(jedi.InternalError):
|
||||
evaluator.compiled_subprocess._test_raise_error(KeyboardInterrupt)
|
||||
infer_state.compiled_subprocess._test_raise_error(KeyboardInterrupt)
|
||||
# Jedi should still work.
|
||||
def_, = Script('str').goto_definitions()
|
||||
assert def_.name == 'str'
|
||||
|
||||
|
||||
def test_stdout_in_subprocess(evaluator, Script):
|
||||
evaluator.compiled_subprocess._test_print(stdout='.')
|
||||
def test_stdout_in_subprocess(infer_state, Script):
|
||||
infer_state.compiled_subprocess._test_print(stdout='.')
|
||||
Script('1').goto_definitions()
|
||||
|
||||
|
||||
def test_killed_subprocess(evaluator, Script, environment):
|
||||
def test_killed_subprocess(infer_state, Script, environment):
|
||||
if isinstance(environment, InterpreterEnvironment):
|
||||
pytest.skip("We cannot kill our own process")
|
||||
# Just kill the subprocess.
|
||||
evaluator.compiled_subprocess._compiled_subprocess._get_process().kill()
|
||||
infer_state.compiled_subprocess._compiled_subprocess._get_process().kill()
|
||||
# Since the process was terminated (and nobody knows about it) the first
|
||||
# Jedi call fails.
|
||||
with pytest.raises(jedi.InternalError):
|
||||
|
||||
@@ -13,12 +13,12 @@ def test_django_default_project(Script):
|
||||
)
|
||||
c, = script.completions()
|
||||
assert c.name == "SomeModel"
|
||||
assert script._evaluator.project._django is True
|
||||
assert script._infer_state.project._django is True
|
||||
|
||||
|
||||
def test_interpreter_project_path():
|
||||
# Run from anywhere it should be the cwd.
|
||||
dir = os.path.join(root_dir, 'test')
|
||||
with set_cwd(dir):
|
||||
project = Interpreter('', [locals()])._evaluator.project
|
||||
project = Interpreter('', [locals()])._infer_state.project
|
||||
assert project._path == dir
|
||||
|
||||
@@ -17,7 +17,7 @@ def test_add_dynamic_mods(Script):
|
||||
# Other fictional modules in another place in the fs.
|
||||
src2 = 'from .. import setup; setup.r(1)'
|
||||
script = Script(src1, path='../setup.py')
|
||||
imports.load_module(script._evaluator, os.path.abspath(fname), src2)
|
||||
imports.load_module(script._infer_state, os.path.abspath(fname), src2)
|
||||
result = script.goto_definitions()
|
||||
assert len(result) == 1
|
||||
assert result[0].description == 'class int'
|
||||
|
||||
@@ -63,7 +63,7 @@ def test_sys_path_with_modifications(Script):
|
||||
""")
|
||||
|
||||
path = os.path.abspath(os.path.join(os.curdir, 'module_name.py'))
|
||||
paths = Script(code, path=path)._evaluator.get_sys_path()
|
||||
paths = Script(code, path=path)._infer_state.get_sys_path()
|
||||
assert '/tmp/.buildout/eggs/important_package.egg' in paths
|
||||
|
||||
|
||||
|
||||
@@ -11,8 +11,8 @@ from jedi.inference.compiled.access import DirectObjectAccess
|
||||
from jedi.inference.gradual.conversion import _stub_to_python_context_set
|
||||
|
||||
|
||||
def test_simple(evaluator, environment):
|
||||
obj = compiled.create_simple_object(evaluator, u'_str_')
|
||||
def test_simple(infer_state, environment):
|
||||
obj = compiled.create_simple_object(infer_state, u'_str_')
|
||||
upper, = obj.py__getattribute__(u'upper')
|
||||
objs = list(upper.execute_with_values())
|
||||
assert len(objs) == 1
|
||||
@@ -23,15 +23,15 @@ def test_simple(evaluator, environment):
|
||||
assert objs[0].name.string_name == expected
|
||||
|
||||
|
||||
def test_builtin_loading(evaluator):
|
||||
string, = evaluator.builtins_module.py__getattribute__(u'str')
|
||||
def test_builtin_loading(infer_state):
|
||||
string, = infer_state.builtins_module.py__getattribute__(u'str')
|
||||
from_name, = string.py__getattribute__(u'__init__')
|
||||
assert from_name.tree_node
|
||||
assert not from_name.py__doc__() # It's a stub
|
||||
|
||||
|
||||
def test_next_docstr(evaluator):
|
||||
next_ = compiled.builtin_from_name(evaluator, u'next')
|
||||
def test_next_docstr(infer_state):
|
||||
next_ = compiled.builtin_from_name(infer_state, u'next')
|
||||
assert next_.tree_node is not None
|
||||
assert next_.py__doc__() == '' # It's a stub
|
||||
for non_stub in _stub_to_python_context_set(next_):
|
||||
@@ -47,12 +47,12 @@ def test_parse_function_doc_illegal_docstr():
|
||||
assert ('', '') == compiled.context._parse_function_doc(docstr)
|
||||
|
||||
|
||||
def test_doc(evaluator):
|
||||
def test_doc(infer_state):
|
||||
"""
|
||||
Even CompiledObject docs always return empty docstrings - not None, that's
|
||||
just a Jedi API definition.
|
||||
"""
|
||||
str_ = compiled.create_simple_object(evaluator, u'')
|
||||
str_ = compiled.create_simple_object(infer_state, u'')
|
||||
# Equals `''.__getnewargs__`
|
||||
obj, = str_.py__getattribute__(u'__getnewargs__')
|
||||
assert obj.py__doc__() == ''
|
||||
@@ -103,7 +103,7 @@ def test_dict_values(Script, environment):
|
||||
def test_getitem_on_none(Script):
|
||||
script = Script('None[1j]')
|
||||
assert not script.goto_definitions()
|
||||
issue, = script._evaluator.analysis
|
||||
issue, = script._infer_state.analysis
|
||||
assert issue.name == 'type-error-not-subscriptable'
|
||||
|
||||
|
||||
@@ -122,7 +122,7 @@ def _return_int():
|
||||
('ret_int', '_return_int', 'test.test_inference.test_compiled'),
|
||||
]
|
||||
)
|
||||
def test_parent_context(same_process_evaluator, attribute, expected_name, expected_parent):
|
||||
def test_parent_context(same_process_infer_state, attribute, expected_name, expected_parent):
|
||||
import decimal
|
||||
|
||||
class C:
|
||||
@@ -135,8 +135,8 @@ def test_parent_context(same_process_evaluator, attribute, expected_name, expect
|
||||
ret_int = _return_int
|
||||
|
||||
o = compiled.CompiledObject(
|
||||
same_process_evaluator,
|
||||
DirectObjectAccess(same_process_evaluator, C)
|
||||
same_process_infer_state,
|
||||
DirectObjectAccess(same_process_infer_state, C)
|
||||
)
|
||||
x, = o.py__getattribute__(attribute)
|
||||
assert x.py__name__() == expected_name
|
||||
@@ -163,9 +163,9 @@ def test_parent_context(same_process_evaluator, attribute, expected_name, expect
|
||||
(Counter("").most_common, ['Counter', 'most_common']),
|
||||
]
|
||||
)
|
||||
def test_qualified_names(same_process_evaluator, obj, expected_names):
|
||||
def test_qualified_names(same_process_infer_state, obj, expected_names):
|
||||
o = compiled.CompiledObject(
|
||||
same_process_evaluator,
|
||||
DirectObjectAccess(same_process_evaluator, obj)
|
||||
same_process_infer_state,
|
||||
DirectObjectAccess(same_process_infer_state, obj)
|
||||
)
|
||||
assert o.get_qualified_names() == tuple(expected_names)
|
||||
|
||||
@@ -66,7 +66,7 @@ def test_instance_doc(Script):
|
||||
assert defs[0].docstring() == 'Docstring of `TestClass`.'
|
||||
|
||||
|
||||
@unittest.skip('need evaluator class for that')
|
||||
@unittest.skip('need infer_state class for that')
|
||||
def test_attribute_docstring(Script):
|
||||
defs = Script("""
|
||||
x = None
|
||||
@@ -75,7 +75,7 @@ def test_attribute_docstring(Script):
|
||||
assert defs[0].docstring() == 'Docstring of `x`.'
|
||||
|
||||
|
||||
@unittest.skip('need evaluator class for that')
|
||||
@unittest.skip('need infer_state class for that')
|
||||
def test_multiple_docstrings(Script):
|
||||
defs = Script("""
|
||||
def func():
|
||||
|
||||
@@ -43,12 +43,12 @@ pkg_zip_path = os.path.join(os.path.dirname(__file__),
|
||||
'pkg.zip')
|
||||
|
||||
|
||||
def test_find_module_package_zipped(Script, evaluator, environment):
|
||||
def test_find_module_package_zipped(Script, infer_state, environment):
|
||||
sys_path = environment.get_sys_path() + [pkg_zip_path]
|
||||
script = Script('import pkg; pkg.mod', sys_path=sys_path)
|
||||
assert len(script.completions()) == 1
|
||||
|
||||
file_io, is_package = evaluator.compiled_subprocess.get_module_info(
|
||||
file_io, is_package = infer_state.compiled_subprocess.get_module_info(
|
||||
sys_path=sys_path,
|
||||
string=u'pkg',
|
||||
full_name=u'pkg'
|
||||
@@ -84,7 +84,7 @@ def test_find_module_package_zipped(Script, evaluator, environment):
|
||||
]
|
||||
|
||||
)
|
||||
def test_correct_zip_package_behavior(Script, evaluator, environment, code,
|
||||
def test_correct_zip_package_behavior(Script, infer_state, environment, code,
|
||||
file, package, path, skip_python2):
|
||||
sys_path = environment.get_sys_path() + [pkg_zip_path]
|
||||
pkg, = Script(code, sys_path=sys_path).goto_definitions()
|
||||
@@ -96,13 +96,13 @@ def test_correct_zip_package_behavior(Script, evaluator, environment, code,
|
||||
assert context.py__path__() == [os.path.join(pkg_zip_path, path)]
|
||||
|
||||
|
||||
def test_find_module_not_package_zipped(Script, evaluator, environment):
|
||||
def test_find_module_not_package_zipped(Script, infer_state, environment):
|
||||
path = os.path.join(os.path.dirname(__file__), 'zipped_imports/not_pkg.zip')
|
||||
sys_path = environment.get_sys_path() + [path]
|
||||
script = Script('import not_pkg; not_pkg.val', sys_path=sys_path)
|
||||
assert len(script.completions()) == 1
|
||||
|
||||
file_io, is_package = evaluator.compiled_subprocess.get_module_info(
|
||||
file_io, is_package = infer_state.compiled_subprocess.get_module_info(
|
||||
sys_path=sys_path,
|
||||
string=u'not_pkg',
|
||||
full_name=u'not_pkg'
|
||||
@@ -310,16 +310,16 @@ def test_compiled_import_none(monkeypatch, Script):
|
||||
(os.path.join(THIS_DIR, '__init__.py'), True, ('ok', 'lala', 'x', 'test_imports')),
|
||||
]
|
||||
)
|
||||
def test_get_modules_containing_name(evaluator, path, goal, is_package):
|
||||
def test_get_modules_containing_name(infer_state, path, goal, is_package):
|
||||
module = imports._load_python_module(
|
||||
evaluator,
|
||||
infer_state,
|
||||
FileIO(path),
|
||||
import_names=('ok', 'lala', 'x'),
|
||||
is_package=is_package,
|
||||
)
|
||||
assert module
|
||||
input_module, found_module = imports.get_modules_containing_name(
|
||||
evaluator,
|
||||
infer_state,
|
||||
[module],
|
||||
'string_that_only_exists_here'
|
||||
)
|
||||
@@ -337,9 +337,9 @@ def test_get_modules_containing_name(evaluator, path, goal, is_package):
|
||||
('/foo/bar/__init__.py', ('foo', 'bar'), True, ('foo', 'bar')),
|
||||
]
|
||||
)
|
||||
def test_load_module_from_path(evaluator, path, base_names, is_package, names):
|
||||
def test_load_module_from_path(infer_state, path, base_names, is_package, names):
|
||||
file_io = KnownContentFileIO(path, '')
|
||||
m = imports._load_module_from_path(evaluator, file_io, base_names)
|
||||
m = imports._load_module_from_path(infer_state, file_io, base_names)
|
||||
assert m.is_package == is_package
|
||||
assert m.string_names == names
|
||||
|
||||
@@ -437,8 +437,8 @@ def test_pre_defined_imports_module(Script, environment, name):
|
||||
module = Script('', path=path)._get_module()
|
||||
assert module.string_names == (name,)
|
||||
|
||||
assert module.evaluator.builtins_module.py__file__() != path
|
||||
assert module.evaluator.typing_module.py__file__() != path
|
||||
assert module.infer_state.builtins_module.py__file__() != path
|
||||
assert module.infer_state.typing_module.py__file__() != path
|
||||
|
||||
|
||||
@pytest.mark.parametrize('name', ('builtins', 'typing'))
|
||||
@@ -454,8 +454,8 @@ def test_import_needed_modules_by_jedi(Script, environment, tmpdir, name):
|
||||
sys_path=[tmpdir.strpath] + environment.get_sys_path(),
|
||||
)
|
||||
module, = script.goto_definitions()
|
||||
assert module._evaluator.builtins_module.py__file__() != module_path
|
||||
assert module._evaluator.typing_module.py__file__() != module_path
|
||||
assert module._infer_state.builtins_module.py__file__() != module_path
|
||||
assert module._infer_state.typing_module.py__file__() != module_path
|
||||
|
||||
|
||||
def test_import_with_semicolon(Script):
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
from textwrap import dedent
|
||||
|
||||
|
||||
def get_definition_and_evaluator(Script, source):
|
||||
def get_definition_and_infer_state(Script, source):
|
||||
first, = Script(dedent(source)).goto_definitions()
|
||||
return first._name._context, first._evaluator
|
||||
return first._name._context, first._infer_state
|
||||
|
||||
|
||||
def test_function_execution(Script):
|
||||
@@ -16,7 +16,7 @@ def test_function_execution(Script):
|
||||
def x():
|
||||
return str()
|
||||
x"""
|
||||
func, evaluator = get_definition_and_evaluator(Script, s)
|
||||
func, infer_state = get_definition_and_infer_state(Script, s)
|
||||
# Now just use the internals of the result (easiest way to get a fully
|
||||
# usable function).
|
||||
# Should return the same result both times.
|
||||
@@ -29,6 +29,6 @@ def test_class_mro(Script):
|
||||
class X(object):
|
||||
pass
|
||||
X"""
|
||||
cls, evaluator = get_definition_and_evaluator(Script, s)
|
||||
cls, infer_state = get_definition_and_infer_state(Script, s)
|
||||
mro = cls.py__mro__()
|
||||
assert [c.name.string_name for c in mro] == ['X', 'object']
|
||||
|
||||
Reference in New Issue
Block a user