forked from VimPlug/jedi
Some analysis improvements.
This commit is contained in:
@@ -13,7 +13,6 @@ import os
|
|||||||
import warnings
|
import warnings
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from jedi._compatibility import unicode
|
|
||||||
from jedi.parser import load_grammar
|
from jedi.parser import load_grammar
|
||||||
from jedi.parser import tree
|
from jedi.parser import tree
|
||||||
from jedi.parser.fast import FastParser
|
from jedi.parser.fast import FastParser
|
||||||
@@ -195,7 +194,7 @@ class Script(object):
|
|||||||
if leaf is None:
|
if leaf is None:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
context = self._evaluator.create_context(self._get_module(), leaf.parent)
|
context = self._evaluator.create_context(self._get_module(), leaf)
|
||||||
definitions = helpers.evaluate_goto_definition(self._evaluator, context, leaf)
|
definitions = helpers.evaluate_goto_definition(self._evaluator, context, leaf)
|
||||||
|
|
||||||
names = [s.name for s in definitions]
|
names = [s.name for s in definitions]
|
||||||
@@ -328,11 +327,12 @@ class Script(object):
|
|||||||
self._evaluator.analysis_modules = [module_node]
|
self._evaluator.analysis_modules = [module_node]
|
||||||
try:
|
try:
|
||||||
for node in module_node.nodes_to_execute():
|
for node in module_node.nodes_to_execute():
|
||||||
|
context = self._get_module().create_context(node)
|
||||||
if node.type in ('funcdef', 'classdef'):
|
if node.type in ('funcdef', 'classdef'):
|
||||||
if node.type == 'classdef':
|
# TODO This is stupid, should be private
|
||||||
continue
|
from jedi.evaluate.finder import _name_to_types
|
||||||
raise NotImplementedError
|
# Resolve the decorators.
|
||||||
er.Function(self._evaluator, node).get_decorated_func()
|
_name_to_types(self._evaluator, context, node.children[1])
|
||||||
elif isinstance(node, tree.Import):
|
elif isinstance(node, tree.Import):
|
||||||
import_names = set(node.get_defined_names())
|
import_names = set(node.get_defined_names())
|
||||||
if node.is_nested():
|
if node.is_nested():
|
||||||
@@ -340,12 +340,12 @@ class Script(object):
|
|||||||
for n in import_names:
|
for n in import_names:
|
||||||
imports.ImportWrapper(context, n).follow()
|
imports.ImportWrapper(context, n).follow()
|
||||||
elif node.type == 'expr_stmt':
|
elif node.type == 'expr_stmt':
|
||||||
types = self._evaluator.eval_element(node)
|
types = context.eval_node(node)
|
||||||
for testlist in node.children[:-1:2]:
|
for testlist in node.children[:-1:2]:
|
||||||
# Iterate tuples.
|
# Iterate tuples.
|
||||||
unpack_tuple_to_dict(self._evaluator, types, testlist)
|
unpack_tuple_to_dict(self._evaluator, types, testlist)
|
||||||
else:
|
else:
|
||||||
try_iter_content(self._evaluator.goto_definitions(node))
|
try_iter_content(self._evaluator.goto_definitions(context, node))
|
||||||
self._evaluator.reset_recursion_limitations()
|
self._evaluator.reset_recursion_limitations()
|
||||||
|
|
||||||
ana = [a for a in self._evaluator.analysis if self.path == a.path]
|
ana = [a for a in self._evaluator.analysis if self.path == a.path]
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ class BaseDefinition(object):
|
|||||||
>>> defs = sorted(defs, key=lambda d: d.line)
|
>>> defs = sorted(defs, key=lambda d: d.line)
|
||||||
>>> defs # doctest: +NORMALIZE_WHITESPACE
|
>>> defs # doctest: +NORMALIZE_WHITESPACE
|
||||||
[<Definition module keyword>, <Definition class C>,
|
[<Definition module keyword>, <Definition class C>,
|
||||||
<Definition class D>, <Definition def f>]
|
<Definition instance D>, <Definition def f>]
|
||||||
|
|
||||||
Finally, here is what you can get from :attr:`type`:
|
Finally, here is what you can get from :attr:`type`:
|
||||||
|
|
||||||
@@ -489,7 +489,7 @@ class Completion(BaseDefinition):
|
|||||||
return '%s: %s%s' % (t, desc, line)
|
return '%s: %s%s' % (t, desc, line)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<%s: %s>' % (type(self).__name__, self._name)
|
return '<%s: %s>' % (type(self).__name__, self._name.string_name)
|
||||||
|
|
||||||
@memoize_method
|
@memoize_method
|
||||||
def _follow_statements_imports(self):
|
def _follow_statements_imports(self):
|
||||||
@@ -556,7 +556,7 @@ class Definition(BaseDefinition):
|
|||||||
"""
|
"""
|
||||||
typ = self.type
|
typ = self.type
|
||||||
tree_name = self._name.tree_name
|
tree_name = self._name.tree_name
|
||||||
if typ in ('function', 'class', 'module') or tree_name is None:
|
if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
|
||||||
if typ == 'function':
|
if typ == 'function':
|
||||||
# For the description we want a short and a pythonic way.
|
# For the description we want a short and a pythonic way.
|
||||||
typ = 'def'
|
typ = 'def'
|
||||||
@@ -743,8 +743,8 @@ class CallSignature(Definition):
|
|||||||
return self._executable.get_parent_until()
|
return self._executable.get_parent_until()
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<%s: %s index %s>' % (type(self).__name__, self._name,
|
return '<%s: %s index %s>' % \
|
||||||
self.index)
|
(type(self).__name__, self._name.string_name, self.index)
|
||||||
|
|
||||||
|
|
||||||
class _Param(Definition):
|
class _Param(Definition):
|
||||||
|
|||||||
@@ -9,19 +9,31 @@ def usages(evaluator, definition_names, mods):
|
|||||||
"""
|
"""
|
||||||
:param definitions: list of Name
|
:param definitions: list of Name
|
||||||
"""
|
"""
|
||||||
|
def resolve_names(definition_names):
|
||||||
|
for name in definition_names:
|
||||||
|
if name.api_type == 'module':
|
||||||
|
found = False
|
||||||
|
for context in name.infer():
|
||||||
|
found = True
|
||||||
|
yield context.name
|
||||||
|
if not found:
|
||||||
|
yield name
|
||||||
|
else:
|
||||||
|
yield name
|
||||||
|
|
||||||
def compare_array(definition_names):
|
def compare_array(definition_names):
|
||||||
""" `definitions` are being compared by module/start_pos, because
|
""" `definitions` are being compared by module/start_pos, because
|
||||||
sometimes the id's of the objects change (e.g. executions).
|
sometimes the id's of the objects change (e.g. executions).
|
||||||
"""
|
"""
|
||||||
return [
|
return [
|
||||||
(d.get_root_context(), d.start_pos)
|
(name.get_root_context(), name.start_pos)
|
||||||
for d in definition_names
|
for name in resolve_names(definition_names)
|
||||||
]
|
]
|
||||||
|
|
||||||
search_name = list(definition_names)[0].string_name
|
search_name = list(definition_names)[0].string_name
|
||||||
compare_definitions = compare_array(definition_names)
|
compare_definitions = compare_array(definition_names)
|
||||||
mods = mods | set([d.get_root_context() for d in definition_names])
|
mods = mods | set([d.get_root_context() for d in definition_names])
|
||||||
definition_names = set(definition_names)
|
definition_names = set(resolve_names(definition_names))
|
||||||
for m in imports.get_modules_containing_name(evaluator, mods, search_name):
|
for m in imports.get_modules_containing_name(evaluator, mods, search_name):
|
||||||
if isinstance(m, ModuleContext):
|
if isinstance(m, ModuleContext):
|
||||||
for name_node in m.module_node.used_names.get(search_name, []):
|
for name_node in m.module_node.used_names.get(search_name, []):
|
||||||
|
|||||||
@@ -169,10 +169,10 @@ class Evaluator(object):
|
|||||||
dct = {str(for_stmt.children[1]): lazy_context.infer()}
|
dct = {str(for_stmt.children[1]): lazy_context.infer()}
|
||||||
with helpers.predefine_names(context, for_stmt, dct):
|
with helpers.predefine_names(context, for_stmt, dct):
|
||||||
t = self.eval_element(context, rhs)
|
t = self.eval_element(context, rhs)
|
||||||
left = precedence.calculate(self, left, operator, t)
|
left = precedence.calculate(self, context, left, operator, t)
|
||||||
types = left
|
types = left
|
||||||
else:
|
else:
|
||||||
types = precedence.calculate(self, left, operator, types)
|
types = precedence.calculate(self, context, left, operator, types)
|
||||||
debug.dbg('eval_statement result %s', types)
|
debug.dbg('eval_statement result %s', types)
|
||||||
return types
|
return types
|
||||||
|
|
||||||
@@ -180,9 +180,16 @@ class Evaluator(object):
|
|||||||
if isinstance(context, iterable.CompForContext):
|
if isinstance(context, iterable.CompForContext):
|
||||||
return self._eval_element_not_cached(context, element)
|
return self._eval_element_not_cached(context, element)
|
||||||
|
|
||||||
if_stmt = element.get_parent_until((tree.IfStmt, tree.ForStmt, tree.IsScope))
|
if_stmt = element
|
||||||
|
while if_stmt is not None:
|
||||||
|
if_stmt = if_stmt.parent
|
||||||
|
if if_stmt.type in ('if_stmt', 'for_stmt'):
|
||||||
|
break
|
||||||
|
if if_stmt.is_scope():
|
||||||
|
if_stmt = None
|
||||||
|
break
|
||||||
predefined_if_name_dict = context.predefined_names.get(if_stmt)
|
predefined_if_name_dict = context.predefined_names.get(if_stmt)
|
||||||
if predefined_if_name_dict is None and isinstance(if_stmt, tree.IfStmt):
|
if predefined_if_name_dict is None and if_stmt and if_stmt.type == 'if_stmt':
|
||||||
if_stmt_test = if_stmt.children[1]
|
if_stmt_test = if_stmt.children[1]
|
||||||
name_dicts = [{}]
|
name_dicts = [{}]
|
||||||
# If we already did a check, we don't want to do it again -> If
|
# If we already did a check, we don't want to do it again -> If
|
||||||
@@ -274,7 +281,7 @@ class Evaluator(object):
|
|||||||
for trailer in element.children[1:]:
|
for trailer in element.children[1:]:
|
||||||
if trailer == '**': # has a power operation.
|
if trailer == '**': # has a power operation.
|
||||||
right = self.eval_element(context, element.children[2])
|
right = self.eval_element(context, element.children[2])
|
||||||
types = set(precedence.calculate(self, types, trailer, right))
|
types = set(precedence.calculate(self, context, types, trailer, right))
|
||||||
break
|
break
|
||||||
types = self.eval_trailer(context, types, trailer)
|
types = self.eval_trailer(context, types, trailer)
|
||||||
elif element.type in ('testlist_star_expr', 'testlist',):
|
elif element.type in ('testlist_star_expr', 'testlist',):
|
||||||
@@ -344,7 +351,7 @@ class Evaluator(object):
|
|||||||
types = self._eval_atom(context, c[0])
|
types = self._eval_atom(context, c[0])
|
||||||
for string in c[1:]:
|
for string in c[1:]:
|
||||||
right = self._eval_atom(context, string)
|
right = self._eval_atom(context, string)
|
||||||
types = precedence.calculate(self, types, '+', right)
|
types = precedence.calculate(self, context, types, '+', right)
|
||||||
return types
|
return types
|
||||||
# Parentheses without commas are not tuples.
|
# Parentheses without commas are not tuples.
|
||||||
elif c[0] == '(' and not len(c) == 2 \
|
elif c[0] == '(' and not len(c) == 2 \
|
||||||
@@ -491,11 +498,6 @@ class Evaluator(object):
|
|||||||
# a name it's something you can "goto" again.
|
# a name it's something you can "goto" again.
|
||||||
return [TreeNameDefinition(context, name)]
|
return [TreeNameDefinition(context, name)]
|
||||||
elif isinstance(par, (tree.Param, tree.Function, tree.Class)) and par.name is name:
|
elif isinstance(par, (tree.Param, tree.Function, tree.Class)) and par.name is name:
|
||||||
if par.type in ('funcdef', 'classdef', 'module'):
|
|
||||||
if par.type == 'funcdef':
|
|
||||||
return [context.function_context.name]
|
|
||||||
else:
|
|
||||||
return [context.name]
|
|
||||||
return [TreeNameDefinition(context, name)]
|
return [TreeNameDefinition(context, name)]
|
||||||
elif isinstance(stmt, tree.Import):
|
elif isinstance(stmt, tree.Import):
|
||||||
module_names = imports.ImportWrapper(context, name).follow(is_goto=True)
|
module_names = imports.ImportWrapper(context, name).follow(is_goto=True)
|
||||||
@@ -600,5 +602,9 @@ class Evaluator(object):
|
|||||||
if node_is_context and node.is_scope():
|
if node_is_context and node.is_scope():
|
||||||
scope_node = node
|
scope_node = node
|
||||||
else:
|
else:
|
||||||
|
if node.parent.type in ('funcdef', 'classdef'):
|
||||||
|
# When we're on class/function names/leafs that define the
|
||||||
|
# object itself and not its contents.
|
||||||
|
node = node.parent
|
||||||
scope_node = parent_scope(node)
|
scope_node = parent_scope(node)
|
||||||
return from_scope_node(scope_node)
|
return from_scope_node(scope_node)
|
||||||
|
|||||||
@@ -58,8 +58,8 @@ class Error(object):
|
|||||||
return self.__unicode__()
|
return self.__unicode__()
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return (self.path == other.path and self.name == other.name
|
return (self.path == other.path and self.name == other.name and
|
||||||
and self._start_pos == other._start_pos)
|
self._start_pos == other._start_pos)
|
||||||
|
|
||||||
def __ne__(self, other):
|
def __ne__(self, other):
|
||||||
return not self.__eq__(other)
|
return not self.__eq__(other)
|
||||||
@@ -77,23 +77,19 @@ class Warning(Error):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None):
|
def add(context, name, jedi_name, message=None, typ=Error, payload=None):
|
||||||
return
|
return
|
||||||
from jedi.evaluate.iterable import MergedNodes
|
from jedi.evaluate import Evaluator
|
||||||
while isinstance(jedi_obj, MergedNodes):
|
if isinstance(context, Evaluator):
|
||||||
if len(jedi_obj) != 1:
|
raise 1
|
||||||
# TODO is this kosher?
|
|
||||||
return
|
|
||||||
jedi_obj = list(jedi_obj)[0]
|
|
||||||
|
|
||||||
exception = CODES[name][1]
|
exception = CODES[name][1]
|
||||||
if _check_for_exception_catch(evaluator, jedi_obj, exception, payload):
|
if _check_for_exception_catch(context, jedi_name, exception, payload):
|
||||||
return
|
return
|
||||||
|
|
||||||
module_path = jedi_obj.get_parent_until().path
|
module_path = jedi_name.get_root_node().path
|
||||||
instance = typ(name, module_path, jedi_obj.start_pos, message)
|
instance = typ(name, module_path, jedi_name.start_pos, message)
|
||||||
debug.warning(str(instance), format=False)
|
debug.warning(str(instance), format=False)
|
||||||
evaluator.analysis.append(instance)
|
context.evaluator.analysis.append(instance)
|
||||||
|
|
||||||
|
|
||||||
def _check_for_setattr(instance):
|
def _check_for_setattr(instance):
|
||||||
@@ -114,25 +110,29 @@ def _check_for_setattr(instance):
|
|||||||
for stmt in stmts)
|
for stmt in stmts)
|
||||||
|
|
||||||
|
|
||||||
def add_attribute_error(evaluator, scope, name):
|
def add_attribute_error(context, name):
|
||||||
message = ('AttributeError: %s has no attribute %s.' % (scope, name))
|
message = ('AttributeError: %s has no attribute %s.' % (context, name))
|
||||||
from jedi.evaluate.instance import AbstractInstanceContext
|
from jedi.evaluate.instance import AbstractInstanceContext, CompiledInstanceName
|
||||||
# Check for __getattr__/__getattribute__ existance and issue a warning
|
# Check for __getattr__/__getattribute__ existance and issue a warning
|
||||||
# instead of an error, if that happens.
|
# instead of an error, if that happens.
|
||||||
if isinstance(scope, AbstractInstanceContext):
|
typ = Error
|
||||||
typ = Warning
|
if isinstance(context, AbstractInstanceContext):
|
||||||
if not (scope.get_function_slot_names('__getattr__') or
|
slot_names = context.get_function_slot_names('__getattr__') + \
|
||||||
scope.get_function_slot_names('__getattribute__')):
|
context.get_function_slot_names('__getattribute__')
|
||||||
if not _check_for_setattr(scope):
|
for n in slot_names:
|
||||||
typ = Error
|
if isinstance(name, CompiledInstanceName) and \
|
||||||
else:
|
n.parent_context.obj == object:
|
||||||
typ = Error
|
typ = Warning
|
||||||
|
break
|
||||||
|
|
||||||
payload = scope, name
|
if _check_for_setattr(context):
|
||||||
add(evaluator, 'attribute-error', name, message, typ, payload)
|
typ = Warning
|
||||||
|
|
||||||
|
payload = context, name
|
||||||
|
add(context, 'attribute-error', name, message, typ, payload)
|
||||||
|
|
||||||
|
|
||||||
def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None):
|
def _check_for_exception_catch(context, jedi_name, exception, payload=None):
|
||||||
"""
|
"""
|
||||||
Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
|
Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
|
||||||
doesn't count as an error (if equal to `exception`).
|
doesn't count as an error (if equal to `exception`).
|
||||||
@@ -153,17 +153,18 @@ def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None):
|
|||||||
colon = next(iterator)
|
colon = next(iterator)
|
||||||
suite = next(iterator)
|
suite = next(iterator)
|
||||||
if branch_type == 'try' \
|
if branch_type == 'try' \
|
||||||
and not (branch_type.start_pos < jedi_obj.start_pos <= suite.end_pos):
|
and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
for node in obj.except_clauses():
|
for node in obj.except_clauses():
|
||||||
if node is None:
|
if node is None:
|
||||||
return True # An exception block that catches everything.
|
return True # An exception block that catches everything.
|
||||||
else:
|
else:
|
||||||
except_classes = evaluator.eval_element(node)
|
except_classes = context.eval_node(node)
|
||||||
for cls in except_classes:
|
for cls in except_classes:
|
||||||
from jedi.evaluate import iterable
|
from jedi.evaluate import iterable
|
||||||
if isinstance(cls, iterable.Array) and cls.type == 'tuple':
|
if isinstance(cls, iterable.AbstractSequence) and \
|
||||||
|
cls.array_type == 'tuple':
|
||||||
# multiple exceptions
|
# multiple exceptions
|
||||||
for typ in unite(cls.py__iter__()):
|
for typ in unite(cls.py__iter__()):
|
||||||
if check_match(typ, exception):
|
if check_match(typ, exception):
|
||||||
@@ -174,7 +175,7 @@ def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None):
|
|||||||
|
|
||||||
def check_hasattr(node, suite):
|
def check_hasattr(node, suite):
|
||||||
try:
|
try:
|
||||||
assert suite.start_pos <= jedi_obj.start_pos < suite.end_pos
|
assert suite.start_pos <= jedi_name.start_pos < suite.end_pos
|
||||||
assert node.type in ('power', 'atom_expr')
|
assert node.type in ('power', 'atom_expr')
|
||||||
base = node.children[0]
|
base = node.children[0]
|
||||||
assert base.type == 'name' and base.value == 'hasattr'
|
assert base.type == 'name' and base.value == 'hasattr'
|
||||||
@@ -183,28 +184,28 @@ def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None):
|
|||||||
arglist = trailer.children[1]
|
arglist = trailer.children[1]
|
||||||
assert arglist.type == 'arglist'
|
assert arglist.type == 'arglist'
|
||||||
from jedi.evaluate.param import Arguments
|
from jedi.evaluate.param import Arguments
|
||||||
args = list(Arguments(evaluator, arglist).unpack())
|
args = list(Arguments(context, arglist).unpack())
|
||||||
# Arguments should be very simple
|
# Arguments should be very simple
|
||||||
assert len(args) == 2
|
assert len(args) == 2
|
||||||
|
|
||||||
# Check name
|
# Check name
|
||||||
key, values = args[1]
|
key, values = args[1]
|
||||||
assert len(values) == 1
|
assert len(values) == 1
|
||||||
names = list(evaluator.eval_element(values[0]))
|
names = list(context.eval_node(values[0]))
|
||||||
assert len(names) == 1 and isinstance(names[0], CompiledObject)
|
assert len(names) == 1 and isinstance(names[0], CompiledObject)
|
||||||
assert names[0].obj == str(payload[1])
|
assert names[0].obj == str(payload[1])
|
||||||
|
|
||||||
# Check objects
|
# Check objects
|
||||||
key, values = args[0]
|
key, values = args[0]
|
||||||
assert len(values) == 1
|
assert len(values) == 1
|
||||||
objects = evaluator.eval_element(values[0])
|
objects = context.eval_node(values[0])
|
||||||
return payload[0] in objects
|
return payload[0] in objects
|
||||||
except AssertionError:
|
except AssertionError:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
obj = jedi_obj
|
obj = jedi_name
|
||||||
while obj is not None and not obj.isinstance(tree.Function, tree.Class):
|
while obj is not None and not isinstance(obj, (tree.Function, tree.Class)):
|
||||||
if obj.isinstance(tree.Flow):
|
if isinstance(obj, tree.Flow):
|
||||||
# try/except catch check
|
# try/except catch check
|
||||||
if obj.isinstance(tree.TryStmt) and check_try_for_except(obj, exception):
|
if obj.isinstance(tree.TryStmt) and check_try_for_except(obj, exception):
|
||||||
return True
|
return True
|
||||||
|
|||||||
@@ -14,9 +14,7 @@ would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.
|
|||||||
Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
|
Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
|
||||||
check for -> a is a string). There's big potential in these checks.
|
check for -> a is a string). There's big potential in these checks.
|
||||||
"""
|
"""
|
||||||
from itertools import chain
|
|
||||||
|
|
||||||
from jedi._compatibility import unicode
|
|
||||||
from jedi.parser import tree
|
from jedi.parser import tree
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi.common import unite
|
from jedi.common import unite
|
||||||
@@ -33,9 +31,7 @@ from jedi.evaluate import analysis
|
|||||||
from jedi.evaluate import flow_analysis
|
from jedi.evaluate import flow_analysis
|
||||||
from jedi.evaluate import param
|
from jedi.evaluate import param
|
||||||
from jedi.evaluate import helpers
|
from jedi.evaluate import helpers
|
||||||
from jedi.evaluate.context import TreeContext
|
from jedi.evaluate.filters import get_global_filters, TreeNameDefinition
|
||||||
from jedi.evaluate.cache import memoize_default
|
|
||||||
from jedi.evaluate.filters import get_global_filters, ContextName
|
|
||||||
|
|
||||||
|
|
||||||
def filter_after_position(names, position, origin=None):
|
def filter_after_position(names, position, origin=None):
|
||||||
@@ -128,22 +124,22 @@ class NameFinder(object):
|
|||||||
isinstance(self._name.parent.parent, tree.Param)):
|
isinstance(self._name.parent.parent, tree.Param)):
|
||||||
if isinstance(self._name, tree.Name):
|
if isinstance(self._name, tree.Name):
|
||||||
if attribute_lookup:
|
if attribute_lookup:
|
||||||
analysis.add_attribute_error(self._evaluator,
|
analysis.add_attribute_error(self._context, self._name)
|
||||||
self._context, self._name)
|
|
||||||
else:
|
else:
|
||||||
message = ("NameError: name '%s' is not defined."
|
message = ("NameError: name '%s' is not defined."
|
||||||
% self._string_name)
|
% self._string_name)
|
||||||
analysis.add(self._evaluator, 'name-error', self._name,
|
analysis.add(self._context, 'name-error', self._name, message)
|
||||||
message)
|
|
||||||
|
|
||||||
return types
|
return types
|
||||||
|
|
||||||
def get_filters(self, search_global=False):
|
def _get_origin_scope(self):
|
||||||
if isinstance(self._name, tree.Name):
|
if isinstance(self._name, tree.Name):
|
||||||
origin_scope = self._name.get_parent_until(tree.Scope, reverse=True)
|
return self._name.get_parent_until(tree.Scope, reverse=True)
|
||||||
else:
|
else:
|
||||||
origin_scope = None
|
return None
|
||||||
|
|
||||||
|
def get_filters(self, search_global=False):
|
||||||
|
origin_scope = self._get_origin_scope()
|
||||||
if search_global:
|
if search_global:
|
||||||
return get_global_filters(self._evaluator, self._context, self._position, origin_scope)
|
return get_global_filters(self._evaluator, self._context, self._position, origin_scope)
|
||||||
else:
|
else:
|
||||||
@@ -321,15 +317,17 @@ class NameFinder(object):
|
|||||||
# Add isinstance and other if/assert knowledge.
|
# Add isinstance and other if/assert knowledge.
|
||||||
if not types and isinstance(self._name, tree.Name) and \
|
if not types and isinstance(self._name, tree.Name) and \
|
||||||
not isinstance(self._name_context, AbstractInstanceContext):
|
not isinstance(self._name_context, AbstractInstanceContext):
|
||||||
# Ignore FunctionExecution parents for now.
|
|
||||||
flow_scope = self._name
|
flow_scope = self._name
|
||||||
|
base_node = self._name_context.get_node()
|
||||||
|
if base_node.type == 'comp_for':
|
||||||
|
return types
|
||||||
while True:
|
while True:
|
||||||
flow_scope = flow_scope.get_parent_scope(include_flows=True)
|
flow_scope = flow_scope.get_parent_scope(include_flows=True)
|
||||||
n = _check_flow_information(self._name_context, flow_scope,
|
n = _check_flow_information(self._name_context, flow_scope,
|
||||||
self._name, self._position)
|
self._name, self._position)
|
||||||
if n is not None:
|
if n is not None:
|
||||||
return n
|
return n
|
||||||
if flow_scope == self._name_context.get_node():
|
if flow_scope == base_node:
|
||||||
break
|
break
|
||||||
return types
|
return types
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,3 @@
|
|||||||
from jedi.parser import tree
|
|
||||||
|
|
||||||
|
|
||||||
class Status(object):
|
class Status(object):
|
||||||
lookup_table = {}
|
lookup_table = {}
|
||||||
|
|
||||||
|
|||||||
@@ -147,10 +147,10 @@ class NestedImportModule(tree.Module):
|
|||||||
self._nested_import)
|
self._nested_import)
|
||||||
|
|
||||||
|
|
||||||
def _add_error(evaluator, name, message=None):
|
def _add_error(context, name, message=None):
|
||||||
|
# Should be a name, not a string!
|
||||||
if hasattr(name, 'parent'):
|
if hasattr(name, 'parent'):
|
||||||
# Should be a name, not a string!
|
analysis.add(context, 'import-error', name, message)
|
||||||
analysis.add(evaluator, 'import-error', name, message)
|
|
||||||
|
|
||||||
|
|
||||||
def get_init_path(directory_path):
|
def get_init_path(directory_path):
|
||||||
@@ -168,25 +168,21 @@ def get_init_path(directory_path):
|
|||||||
class ImportName(AbstractNameDefinition):
|
class ImportName(AbstractNameDefinition):
|
||||||
start_pos = (1, 0)
|
start_pos = (1, 0)
|
||||||
|
|
||||||
def __init__(self, parent_module, string_name):
|
def __init__(self, parent_context, string_name):
|
||||||
self.parent_module = parent_module
|
self.parent_context = parent_context
|
||||||
self.string_name = string_name
|
self.string_name = string_name
|
||||||
|
|
||||||
def infer(self):
|
def infer(self):
|
||||||
return Importer(
|
return Importer(
|
||||||
self.parent_module.evaluator,
|
self.parent_context.evaluator,
|
||||||
[self.string_name],
|
[self.string_name],
|
||||||
self.parent_module,
|
self.parent_context,
|
||||||
).follow()
|
).follow()
|
||||||
|
|
||||||
def get_root_context(self):
|
def get_root_context(self):
|
||||||
# Not sure if this is correct.
|
# Not sure if this is correct.
|
||||||
return self.parent_context.get_root_context()
|
return self.parent_context.get_root_context()
|
||||||
|
|
||||||
@property
|
|
||||||
def parent_context(self):
|
|
||||||
return self.parent_module
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def api_type(self):
|
def api_type(self):
|
||||||
return 'module'
|
return 'module'
|
||||||
@@ -195,18 +191,12 @@ class ImportName(AbstractNameDefinition):
|
|||||||
class SubModuleName(ImportName):
|
class SubModuleName(ImportName):
|
||||||
def infer(self):
|
def infer(self):
|
||||||
return Importer(
|
return Importer(
|
||||||
self.parent_module.evaluator,
|
self.parent_context.evaluator,
|
||||||
[self.string_name],
|
[self.string_name],
|
||||||
self.parent_module,
|
self.parent_context,
|
||||||
level=1
|
level=1
|
||||||
).follow()
|
).follow()
|
||||||
|
|
||||||
@property
|
|
||||||
def parent_context(self):
|
|
||||||
# This is a bit of a special case. But it seems like it's working well.
|
|
||||||
# Since a SubModuleName is basically a lazy name to a module
|
|
||||||
return next(iter(self.infer()))
|
|
||||||
|
|
||||||
|
|
||||||
class Importer(object):
|
class Importer(object):
|
||||||
def __init__(self, evaluator, import_path, module_context, level=0):
|
def __init__(self, evaluator, import_path, module_context, level=0):
|
||||||
@@ -249,7 +239,7 @@ class Importer(object):
|
|||||||
if dir_name:
|
if dir_name:
|
||||||
import_path.insert(0, dir_name)
|
import_path.insert(0, dir_name)
|
||||||
else:
|
else:
|
||||||
_add_error(self._evaluator, import_path[-1])
|
_add_error(module_context, import_path[-1])
|
||||||
import_path = []
|
import_path = []
|
||||||
# TODO add import error.
|
# TODO add import error.
|
||||||
debug.warning('Attempted relative import beyond top-level package.')
|
debug.warning('Attempted relative import beyond top-level package.')
|
||||||
@@ -336,7 +326,7 @@ class Importer(object):
|
|||||||
method = parent_module.py__path__
|
method = parent_module.py__path__
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
# The module is not a package.
|
# The module is not a package.
|
||||||
_add_error(self._evaluator, import_path[-1])
|
_add_error(parent_module, import_path[-1])
|
||||||
return set()
|
return set()
|
||||||
else:
|
else:
|
||||||
paths = method()
|
paths = method()
|
||||||
@@ -351,7 +341,7 @@ class Importer(object):
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
module_path = None
|
module_path = None
|
||||||
if module_path is None:
|
if module_path is None:
|
||||||
_add_error(self._evaluator, import_path[-1])
|
_add_error(parent_module, import_path[-1])
|
||||||
return set()
|
return set()
|
||||||
else:
|
else:
|
||||||
parent_module = None
|
parent_module = None
|
||||||
@@ -367,7 +357,7 @@ class Importer(object):
|
|||||||
sys.path = temp
|
sys.path = temp
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# The module is not a package.
|
# The module is not a package.
|
||||||
_add_error(self._evaluator, import_path[-1])
|
_add_error(parent_module, import_path[-1])
|
||||||
return set()
|
return set()
|
||||||
|
|
||||||
source = None
|
source = None
|
||||||
|
|||||||
@@ -63,11 +63,9 @@ class AbstractArguments():
|
|||||||
Evaluates all arguments as a support for static analysis
|
Evaluates all arguments as a support for static analysis
|
||||||
(normally Jedi).
|
(normally Jedi).
|
||||||
"""
|
"""
|
||||||
raise DeprecationWarning
|
for key, lazy_context in self.unpack():
|
||||||
for key, element_values in self.unpack():
|
types = lazy_context.infer()
|
||||||
for element in element_values:
|
try_iter_content(types)
|
||||||
types = self._evaluator.eval_element(self.context, element)
|
|
||||||
try_iter_content(types)
|
|
||||||
|
|
||||||
|
|
||||||
class TreeArguments(AbstractArguments):
|
class TreeArguments(AbstractArguments):
|
||||||
@@ -260,7 +258,7 @@ def get_params(evaluator, parent_context, func, var_args):
|
|||||||
% (func.name, key))
|
% (func.name, key))
|
||||||
calling_va = _get_calling_var_args(evaluator, var_args)
|
calling_va = _get_calling_var_args(evaluator, var_args)
|
||||||
if calling_va is not None:
|
if calling_va is not None:
|
||||||
analysis.add(evaluator, 'type-error-multiple-values',
|
analysis.add(parent_context, 'type-error-multiple-values',
|
||||||
calling_va, message=m)
|
calling_va, message=m)
|
||||||
else:
|
else:
|
||||||
keys_used[key] = ExecutedParam(parent_context, key_param, var_args, argument)
|
keys_used[key] = ExecutedParam(parent_context, key_param, var_args, argument)
|
||||||
@@ -302,7 +300,7 @@ def get_params(evaluator, parent_context, func, var_args):
|
|||||||
calling_va = var_args.get_calling_var_args()
|
calling_va = var_args.get_calling_var_args()
|
||||||
if calling_va is not None:
|
if calling_va is not None:
|
||||||
m = _error_argument_count(func, len(unpacked_va))
|
m = _error_argument_count(func, len(unpacked_va))
|
||||||
analysis.add(evaluator, 'type-error-too-few-arguments',
|
analysis.add(parent_context, 'type-error-too-few-arguments',
|
||||||
calling_va, message=m)
|
calling_va, message=m)
|
||||||
else:
|
else:
|
||||||
result_arg = argument
|
result_arg = argument
|
||||||
@@ -323,13 +321,13 @@ def get_params(evaluator, parent_context, func, var_args):
|
|||||||
calling_va = _get_calling_var_args(evaluator, var_args)
|
calling_va = _get_calling_var_args(evaluator, var_args)
|
||||||
if calling_va is not None:
|
if calling_va is not None:
|
||||||
m = _error_argument_count(func, len(unpacked_va))
|
m = _error_argument_count(func, len(unpacked_va))
|
||||||
analysis.add(evaluator, 'type-error-too-few-arguments',
|
analysis.add(parent_context, 'type-error-too-few-arguments',
|
||||||
calling_va, message=m)
|
calling_va, message=m)
|
||||||
|
|
||||||
for key, argument in non_matching_keys.items():
|
for key, argument in non_matching_keys.items():
|
||||||
m = "TypeError: %s() got an unexpected keyword argument '%s'." \
|
m = "TypeError: %s() got an unexpected keyword argument '%s'." \
|
||||||
% (func.name, key)
|
% (func.name, key)
|
||||||
analysis.add(evaluator, 'type-error-keyword-argument', argument.whatever, message=m)
|
analysis.add(parent_context, 'type-error-keyword-argument', argument.whatever, message=m)
|
||||||
|
|
||||||
remaining_arguments = list(var_arg_iterator)
|
remaining_arguments = list(var_arg_iterator)
|
||||||
if remaining_arguments:
|
if remaining_arguments:
|
||||||
@@ -354,7 +352,7 @@ def get_params(evaluator, parent_context, func, var_args):
|
|||||||
# print('\t\tnonkw', non_kw_param.parent.var_args.argument_node, )
|
# print('\t\tnonkw', non_kw_param.parent.var_args.argument_node, )
|
||||||
if origin_args not in [f.parent.parent for f in first_values]:
|
if origin_args not in [f.parent.parent for f in first_values]:
|
||||||
continue
|
continue
|
||||||
analysis.add(evaluator, 'type-error-too-many-arguments',
|
analysis.add(parent_context, 'type-error-too-many-arguments',
|
||||||
v, message=m)
|
v, message=m)
|
||||||
return result_params
|
return result_params
|
||||||
|
|
||||||
|
|||||||
@@ -59,13 +59,13 @@ def calculate_children(evaluator, context, children):
|
|||||||
types = context.eval_node(right)
|
types = context.eval_node(right)
|
||||||
# Otherwise continue, because of uncertainty.
|
# Otherwise continue, because of uncertainty.
|
||||||
else:
|
else:
|
||||||
types = calculate(evaluator, types, operator,
|
types = calculate(evaluator, context, types, operator,
|
||||||
context.eval_node(right))
|
context.eval_node(right))
|
||||||
debug.dbg('calculate_children types %s', types)
|
debug.dbg('calculate_children types %s', types)
|
||||||
return types
|
return types
|
||||||
|
|
||||||
|
|
||||||
def calculate(evaluator, left_result, operator, right_result):
|
def calculate(evaluator, context, left_result, operator, right_result):
|
||||||
result = set()
|
result = set()
|
||||||
if not left_result or not right_result:
|
if not left_result or not right_result:
|
||||||
# illegal slices e.g. cause left/right_result to be None
|
# illegal slices e.g. cause left/right_result to be None
|
||||||
@@ -80,7 +80,7 @@ def calculate(evaluator, left_result, operator, right_result):
|
|||||||
else:
|
else:
|
||||||
for left in left_result:
|
for left in left_result:
|
||||||
for right in right_result:
|
for right in right_result:
|
||||||
result |= _element_calculate(evaluator, left, operator, right)
|
result |= _element_calculate(evaluator, context, left, operator, right)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
@@ -125,7 +125,7 @@ def _is_list(obj):
|
|||||||
return isinstance(obj, iterable.AbstractSequence) and obj.array_type == 'list'
|
return isinstance(obj, iterable.AbstractSequence) and obj.array_type == 'list'
|
||||||
|
|
||||||
|
|
||||||
def _element_calculate(evaluator, left, operator, right):
|
def _element_calculate(evaluator, context, left, operator, right):
|
||||||
from jedi.evaluate import iterable, instance
|
from jedi.evaluate import iterable, instance
|
||||||
l_is_num = _is_number(left)
|
l_is_num = _is_number(left)
|
||||||
r_is_num = _is_number(right)
|
r_is_num = _is_number(right)
|
||||||
@@ -173,7 +173,7 @@ def _element_calculate(evaluator, left, operator, right):
|
|||||||
if operator in ('+', '-') and l_is_num != r_is_num \
|
if operator in ('+', '-') and l_is_num != r_is_num \
|
||||||
and not (check(left) or check(right)):
|
and not (check(left) or check(right)):
|
||||||
message = "TypeError: unsupported operand type(s) for +: %s and %s"
|
message = "TypeError: unsupported operand type(s) for +: %s and %s"
|
||||||
analysis.add(evaluator, 'type-error-operation', operator,
|
analysis.add(context, 'type-error-operation', operator,
|
||||||
message % (left, right))
|
message % (left, right))
|
||||||
|
|
||||||
return set([left, right])
|
return set([left, right])
|
||||||
|
|||||||
@@ -58,11 +58,11 @@ def execute(evaluator, obj, arguments):
|
|||||||
|
|
||||||
def _follow_param(evaluator, arguments, index):
|
def _follow_param(evaluator, arguments, index):
|
||||||
try:
|
try:
|
||||||
key, values = list(arguments.unpack())[index]
|
key, lazy_context = list(arguments.unpack())[index]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
return set()
|
return set()
|
||||||
else:
|
else:
|
||||||
return unite(evaluator.eval_element(v) for v in values)
|
return lazy_context.infer()
|
||||||
|
|
||||||
|
|
||||||
def argument_clinic(string, want_obj=False, want_context=False, want_arguments=False):
|
def argument_clinic(string, want_obj=False, want_context=False, want_arguments=False):
|
||||||
@@ -219,7 +219,7 @@ def builtins_isinstance(evaluator, objects, types, arguments):
|
|||||||
message = 'TypeError: isinstance() arg 2 must be a ' \
|
message = 'TypeError: isinstance() arg 2 must be a ' \
|
||||||
'class, type, or tuple of classes and types, ' \
|
'class, type, or tuple of classes and types, ' \
|
||||||
'not %s.' % cls_or_tup
|
'not %s.' % cls_or_tup
|
||||||
analysis.add(evaluator, 'type-error-isinstance', node, message)
|
analysis.add(cls_or_tup, 'type-error-isinstance', node, message)
|
||||||
|
|
||||||
return set(compiled.create(evaluator, x) for x in bool_results)
|
return set(compiled.create(evaluator, x) for x in bool_results)
|
||||||
|
|
||||||
@@ -244,11 +244,12 @@ def collections_namedtuple(evaluator, obj, arguments):
|
|||||||
_fields = list(_follow_param(evaluator, arguments, 1))[0]
|
_fields = list(_follow_param(evaluator, arguments, 1))[0]
|
||||||
if isinstance(_fields, compiled.CompiledObject):
|
if isinstance(_fields, compiled.CompiledObject):
|
||||||
fields = _fields.obj.replace(',', ' ').split()
|
fields = _fields.obj.replace(',', ' ').split()
|
||||||
elif isinstance(_fields, iterable.Array):
|
elif isinstance(_fields, iterable.AbstractSequence):
|
||||||
try:
|
fields = [
|
||||||
fields = [v.obj for v in unite(_fields.py__iter__())]
|
v.obj
|
||||||
except AttributeError:
|
for lazy_context in _fields.py__iter__()
|
||||||
return set()
|
for v in lazy_context.infer() if hasattr(v, 'obj')
|
||||||
|
]
|
||||||
else:
|
else:
|
||||||
return set()
|
return set()
|
||||||
|
|
||||||
@@ -265,7 +266,7 @@ def collections_namedtuple(evaluator, obj, arguments):
|
|||||||
|
|
||||||
# Parse source
|
# Parse source
|
||||||
generated_class = ParserWithRecovery(evaluator.grammar, unicode(source)).module.subscopes[0]
|
generated_class = ParserWithRecovery(evaluator.grammar, unicode(source)).module.subscopes[0]
|
||||||
return set([er.Class(evaluator, generated_class)])
|
return set([er.ClassContext(evaluator, generated_class, evaluator.BUILTINS)])
|
||||||
|
|
||||||
|
|
||||||
@argument_clinic('first, /')
|
@argument_clinic('first, /')
|
||||||
|
|||||||
@@ -71,9 +71,12 @@ def pytest_generate_tests(metafunc):
|
|||||||
|
|
||||||
if 'static_analysis_case' in metafunc.fixturenames:
|
if 'static_analysis_case' in metafunc.fixturenames:
|
||||||
base_dir = os.path.join(os.path.dirname(__file__), 'static_analysis')
|
base_dir = os.path.join(os.path.dirname(__file__), 'static_analysis')
|
||||||
|
cases = list(collect_static_analysis_tests(base_dir, test_files))
|
||||||
metafunc.parametrize(
|
metafunc.parametrize(
|
||||||
'static_analysis_case',
|
'static_analysis_case',
|
||||||
collect_static_analysis_tests(base_dir, test_files))
|
cases,
|
||||||
|
ids=[c.name for c in cases]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def collect_static_analysis_tests(base_dir, test_files):
|
def collect_static_analysis_tests(base_dir, test_files):
|
||||||
@@ -91,6 +94,7 @@ class StaticAnalysisCase(object):
|
|||||||
"""
|
"""
|
||||||
def __init__(self, path):
|
def __init__(self, path):
|
||||||
self._path = path
|
self._path = path
|
||||||
|
self.name = os.path.basename(path)
|
||||||
with open(path) as f:
|
with open(path) as f:
|
||||||
self._source = f.read()
|
self._source = f.read()
|
||||||
|
|
||||||
@@ -98,7 +102,6 @@ class StaticAnalysisCase(object):
|
|||||||
for line in self._source.splitlines():
|
for line in self._source.splitlines():
|
||||||
self.skip = self.skip or run.skip_python_version(line)
|
self.skip = self.skip or run.skip_python_version(line)
|
||||||
|
|
||||||
|
|
||||||
def collect_comparison(self):
|
def collect_comparison(self):
|
||||||
cases = []
|
cases = []
|
||||||
for line_nr, line in enumerate(self._source.splitlines(), 1):
|
for line_nr, line in enumerate(self._source.splitlines(), 1):
|
||||||
|
|||||||
Reference in New Issue
Block a user