forked from VimPlug/jedi
Make a lot of progress with typeshed/typing
This commit is contained in:
@@ -217,6 +217,7 @@ def _get_item(context, index_contexts, contextualized_node):
|
||||
ContextSet.from_set(unused_contexts),
|
||||
contextualized_node
|
||||
)
|
||||
debug.dbg('py__getitem__ result: %s', result)
|
||||
return result
|
||||
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ py__doc__(include_call_signature: Returns the docstring for a context.
|
||||
====================================== ========================================
|
||||
|
||||
"""
|
||||
from jedi import debug
|
||||
from jedi._compatibility import use_metaclass
|
||||
from jedi.parser_utils import get_parent_scope
|
||||
from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass
|
||||
@@ -150,7 +151,7 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
|
||||
File "<stdin>", line 1, in <module>
|
||||
TypeError: int() takes at most 2 arguments (3 given)
|
||||
"""
|
||||
pass
|
||||
debug.warning('Super class of %s is not a class: %s', self, cls)
|
||||
else:
|
||||
add(cls)
|
||||
for cls_new in mro_method():
|
||||
@@ -220,6 +221,7 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
|
||||
return ContextName(self, self.tree_node.name)
|
||||
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
print(self.parent_context.__class__.__name__)
|
||||
for cls in list(self.py__mro__()):
|
||||
pass
|
||||
print('ha', self, list(self.py__mro__()))
|
||||
|
||||
@@ -3,90 +3,103 @@ We need to somehow work with the typing objects. Since the typing objects are
|
||||
pretty bare we need to add all the Jedi customizations to make them work as
|
||||
contexts.
|
||||
"""
|
||||
from parso.python import tree
|
||||
|
||||
from jedi import debug
|
||||
from jedi.evaluate.compiled import builtin_from_name, CompiledObject
|
||||
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, Context
|
||||
from jedi.evaluate.context.iterable import SequenceLiteralContext
|
||||
from jedi.evaluate.filters import FilterWrapper, NameWrapper
|
||||
from jedi.evaluate.filters import FilterWrapper, NameWrapper, \
|
||||
AbstractTreeName
|
||||
|
||||
_PROXY_TYPES = 'Optional Union Callable Type ClassVar Tuple Generic Protocol'.split()
|
||||
_PROXY_CLASS_TYPES = 'Tuple Generic Protocol'.split()
|
||||
_TYPE_ALIAS_TYPES = 'List Dict DefaultDict Set FrozenSet Counter Deque ChainMap'.split()
|
||||
_PROXY_TYPES = 'Optional Union Callable Type ClassVar'.split()
|
||||
|
||||
|
||||
class _TypingBase(object):
|
||||
def __init__(self, name, typing_context):
|
||||
class TypingName(AbstractTreeName):
|
||||
def __init__(self, context, other_name):
|
||||
super(TypingName, self).__init__(context.parent_context, other_name.tree_name)
|
||||
self._context = context
|
||||
|
||||
def infer(self):
|
||||
return ContextSet(self._context)
|
||||
|
||||
|
||||
class _BaseTypingContext(Context):
|
||||
def __init__(self, name):
|
||||
super(_BaseTypingContext, self).__init__(
|
||||
name.parent_context.evaluator,
|
||||
parent_context=name.parent_context,
|
||||
)
|
||||
self._name = name
|
||||
self._context = typing_context
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._context, name)
|
||||
@property
|
||||
def name(self):
|
||||
return TypingName(self, self._name)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (self.__class__.__name__, self._context)
|
||||
return '%s(%s)' % (self.__class__.__name__, self._name.string_name)
|
||||
|
||||
|
||||
class TypingModuleName(NameWrapper):
|
||||
def infer(self):
|
||||
return ContextSet.from_iterable(
|
||||
self._remap(context) for context in self._wrapped_name.infer()
|
||||
)
|
||||
return ContextSet.from_iterable(self._remap())
|
||||
|
||||
def _remap(self, context):
|
||||
def _remap(self):
|
||||
# TODO we don't want the SpecialForm bullshit
|
||||
name = self.string_name
|
||||
print('name', name)
|
||||
if name in (_PROXY_TYPES + _TYPE_ALIAS_TYPES):
|
||||
print('NAME', name)
|
||||
return TypingProxy(name, context)
|
||||
evaluator = self.parent_context.evaluator
|
||||
if name in (_PROXY_CLASS_TYPES + _TYPE_ALIAS_TYPES):
|
||||
yield TypingClassContext(self)
|
||||
elif name == _PROXY_TYPES:
|
||||
yield TypingContext(self)
|
||||
elif name == 'runtime':
|
||||
# We don't want anything here, not sure what this function is
|
||||
# supposed to do, since it just appears in the stubs and shouldn't
|
||||
# have any effects there (because it's never executed).
|
||||
return
|
||||
elif name == 'TypeVar':
|
||||
return TypeVarClass(context.evaluator)
|
||||
yield TypeVarClass(evaluator)
|
||||
elif name == 'Any':
|
||||
return Any(context)
|
||||
yield Any()
|
||||
elif name == 'TYPE_CHECKING':
|
||||
# This is needed for e.g. imports that are only available for type
|
||||
# checking or are in cycles. The user can then check this variable.
|
||||
return builtin_from_name(context.evaluator, u'True')
|
||||
yield builtin_from_name(evaluator, u'True')
|
||||
elif name == 'overload':
|
||||
# TODO implement overload
|
||||
return context
|
||||
pass
|
||||
elif name == 'cast':
|
||||
# TODO implement cast
|
||||
return context
|
||||
for c in self._wrapped_name.infer(): # Fuck my life Python 2
|
||||
yield c
|
||||
elif name == 'TypedDict':
|
||||
# TODO implement
|
||||
# e.g. Movie = TypedDict('Movie', {'name': str, 'year': int})
|
||||
return context
|
||||
# TODO doesn't even exist in typeshed/typing.py, yet. But will be
|
||||
# added soon.
|
||||
pass
|
||||
elif name in ('no_type_check', 'no_type_check_decorator'):
|
||||
# This is not necessary, as long as we are not doing type checking.
|
||||
return context
|
||||
return context
|
||||
for c in self._wrapped_name.infer(): # Fuck my life Python 2
|
||||
yield c
|
||||
else:
|
||||
# Everything else shouldn't be relevant for type checking.
|
||||
for c in self._wrapped_name.infer(): # Fuck my life Python 2
|
||||
yield c
|
||||
|
||||
|
||||
class TypingModuleFilterWrapper(FilterWrapper):
|
||||
name_wrapper_class = TypingModuleName
|
||||
|
||||
|
||||
class TypingProxy(_TypingBase):
|
||||
py__simple_getitem__ = None
|
||||
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
return ContextSet.from_iterable(
|
||||
TypingProxyWithIndex(self._name, self._context, index_context)
|
||||
for index_context in index_context_set
|
||||
)
|
||||
|
||||
|
||||
class _WithIndexBase(_TypingBase):
|
||||
def __init__(self, name, class_context, index_context):
|
||||
super(_WithIndexBase, self).__init__(name, class_context)
|
||||
class _WithIndexBase(_BaseTypingContext):
|
||||
def __init__(self, name, index_context):
|
||||
super(_WithIndexBase, self).__init__(name)
|
||||
self._index_context = index_context
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s, %s)' % (
|
||||
return '<%s: %s[%s]>' % (
|
||||
self.__class__.__name__,
|
||||
self._context,
|
||||
self._index_context
|
||||
self._name.string_name,
|
||||
self._index_context,
|
||||
)
|
||||
|
||||
def _execute_annotations_for_all_indexes(self):
|
||||
@@ -95,7 +108,7 @@ class _WithIndexBase(_TypingBase):
|
||||
).execute_annotation()
|
||||
|
||||
|
||||
class TypingProxyWithIndex(_WithIndexBase):
|
||||
class TypingContextWithIndex(_WithIndexBase):
|
||||
def execute_annotation(self):
|
||||
name = self._name
|
||||
if name in _TYPE_ALIAS_TYPES:
|
||||
@@ -122,6 +135,30 @@ class TypingProxyWithIndex(_WithIndexBase):
|
||||
return ContextSet(cls(name, self._context, self._index_context))
|
||||
|
||||
|
||||
class TypingContext(_BaseTypingContext):
|
||||
index_class = TypingContextWithIndex
|
||||
py__simple_getitem__ = None
|
||||
|
||||
def py__getitem__(self, index_context_set, contextualized_node):
|
||||
return ContextSet.from_iterable(
|
||||
self.index_class(self._name, index_context)
|
||||
for index_context in index_context_set
|
||||
)
|
||||
|
||||
|
||||
class TypingClassMixin(object):
|
||||
def py__mro__(self):
|
||||
return (self,)
|
||||
|
||||
|
||||
class TypingClassContextWithIndex(TypingClassMixin, TypingContextWithIndex):
|
||||
pass
|
||||
|
||||
|
||||
class TypingClassContext(TypingClassMixin, TypingContext):
|
||||
index_class = TypingClassContextWithIndex
|
||||
|
||||
|
||||
def _iter_over_arguments(maybe_tuple_context):
|
||||
if isinstance(maybe_tuple_context, SequenceLiteralContext):
|
||||
for lazy_context in maybe_tuple_context.py__iter__():
|
||||
@@ -175,16 +212,14 @@ class Tuple(_ContainerBase):
|
||||
|
||||
|
||||
class Generic(_ContainerBase):
|
||||
# TODO implement typevars
|
||||
pass
|
||||
|
||||
|
||||
# For pure type inference these two classes are basically the same. It's much
|
||||
# more interesting once you do type checking.
|
||||
Protocol = Generic
|
||||
class Protocol(_ContainerBase):
|
||||
pass
|
||||
|
||||
|
||||
class Any(_TypingBase):
|
||||
class Any(_BaseTypingContext):
|
||||
def __init__(self):
|
||||
# Any is basically object, when it comes to type inference/completions.
|
||||
# This is obviously not correct, but let's just use this for now.
|
||||
@@ -240,20 +275,28 @@ class TypeVar(Context):
|
||||
def __init__(self, evaluator, name, unpacked_args):
|
||||
super(TypeVar, self).__init__(evaluator)
|
||||
self._name = name
|
||||
self._unpacked_args = unpacked_args
|
||||
|
||||
def _unpack(self):
|
||||
# TODO
|
||||
constraints = ContextSet()
|
||||
bound = None
|
||||
covariant = False
|
||||
contravariant = False
|
||||
for key, lazy_context in unpacked:
|
||||
self._constraints_lazy_contexts = []
|
||||
self._bound_lazy_context = None
|
||||
self._covariant_lazy_context = None
|
||||
self._contravariant_lazy_context = None
|
||||
for key, lazy_context in unpacked_args:
|
||||
if key is None:
|
||||
constraints |= lazy_context.infer()
|
||||
self._constraints_lazy_contexts.append(lazy_context)
|
||||
else:
|
||||
if name == 'bound':
|
||||
bound = lazy_context.infer()
|
||||
if key == 'bound':
|
||||
self._bound_lazy_context = lazy_context
|
||||
elif key == 'covariant':
|
||||
self._covariant_lazy_context = lazy_context
|
||||
elif key == 'contravariant':
|
||||
self._contra_variant_lazy_context = lazy_context
|
||||
else:
|
||||
debug.warning('Invalid TypeVar param name %s', key)
|
||||
|
||||
def execute_annotation(self):
|
||||
if self._bound_lazy_context is not None:
|
||||
return self._bound_lazy_context.infer()
|
||||
return NO_CONTEXTS
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._name)
|
||||
|
||||
@@ -10,9 +10,9 @@ from jedi.evaluate.base_context import ContextSet, iterator_to_context_set
|
||||
from jedi.evaluate.filters import AbstractTreeName, ParserTreeFilter, \
|
||||
TreeNameDefinition
|
||||
from jedi.evaluate.context import ModuleContext, FunctionContext, ClassContext
|
||||
from jedi.evaluate.context.typing import TypingModuleFilterWrapper
|
||||
from jedi.evaluate.context.typing import TypingModuleFilterWrapper, \
|
||||
TypingModuleName
|
||||
from jedi.evaluate.compiled import CompiledObject
|
||||
from jedi.evaluate.syntax_tree import tree_name_to_contexts
|
||||
from jedi.evaluate.utils import to_list
|
||||
|
||||
|
||||
@@ -164,27 +164,22 @@ class TypeshedPlugin(BasePlugin):
|
||||
return wrapper
|
||||
|
||||
|
||||
class StubName(TreeNameDefinition):
|
||||
class NameWithStub(TreeNameDefinition):
|
||||
"""
|
||||
This name is only here to mix stub names with non-stub names. The idea is
|
||||
that the user can goto the actual name, but end up on the definition of the
|
||||
stub when inferring types.
|
||||
"""
|
||||
|
||||
def __init__(self, parent_context, tree_name, stub_parent_context, stub_tree_name):
|
||||
super(StubName, self).__init__(parent_context, tree_name)
|
||||
self._stub_parent_context = stub_parent_context
|
||||
self._stub_tree_name = stub_tree_name
|
||||
def __init__(self, parent_context, tree_name, stub_name):
|
||||
super(NameWithStub, self).__init__(parent_context, tree_name)
|
||||
self._stub_name = stub_name
|
||||
|
||||
@memoize_method
|
||||
@iterator_to_context_set
|
||||
def infer(self):
|
||||
actual_contexts = super(StubName, self).infer()
|
||||
stub_contexts = tree_name_to_contexts(
|
||||
self.parent_context.evaluator,
|
||||
self._stub_parent_context,
|
||||
self._stub_tree_name
|
||||
)
|
||||
actual_contexts = super(NameWithStub, self).infer()
|
||||
stub_contexts = self._stub_name.infer()
|
||||
|
||||
if not actual_contexts:
|
||||
for c in stub_contexts:
|
||||
@@ -217,13 +212,24 @@ class StubName(TreeNameDefinition):
|
||||
|
||||
|
||||
class StubParserTreeFilter(ParserTreeFilter):
|
||||
name_class = StubName
|
||||
name_class = NameWithStub
|
||||
|
||||
def __init__(self, non_stub_filters, *args, **kwargs):
|
||||
self._search_global = kwargs.pop('search_global') # Python 2 :/
|
||||
super(StubParserTreeFilter, self).__init__(*args, **kwargs)
|
||||
self._non_stub_filters = non_stub_filters
|
||||
|
||||
def get(self, name):
|
||||
try:
|
||||
names = self._used_names[name]
|
||||
except KeyError:
|
||||
return self._get_non_stub_names(name)
|
||||
|
||||
return self._convert_names(self._filter(names))
|
||||
|
||||
# TODO maybe implement values, because currently the names that don't exist
|
||||
# in the stub file are not part of values.
|
||||
|
||||
def _check_flows(self, names):
|
||||
return names
|
||||
|
||||
@@ -241,18 +247,20 @@ class StubParserTreeFilter(ParserTreeFilter):
|
||||
# Try to match the names of stubs with non-stubs. If there's no
|
||||
# match, just use the stub name. The user will be directed there
|
||||
# for all API accesses. Otherwise the user will be directed to the
|
||||
# non-stub positions (see StubName).
|
||||
if not len(non_stub_names):
|
||||
yield TreeNameDefinition(self.context, name)
|
||||
else:
|
||||
# non-stub positions (see NameWithStub).
|
||||
n = TreeNameDefinition(self.context, name)
|
||||
if isinstance(self.context, TypingModuleWrapper):
|
||||
n = TypingModuleName(n)
|
||||
if len(non_stub_names):
|
||||
for non_stub_name in non_stub_names:
|
||||
assert isinstance(non_stub_name, AbstractTreeName), non_stub_name
|
||||
yield self.name_class(
|
||||
non_stub_name.parent_context,
|
||||
non_stub_name.tree_name,
|
||||
self.context,
|
||||
name,
|
||||
stub_name=n,
|
||||
)
|
||||
else:
|
||||
yield n
|
||||
|
||||
def _is_name_reachable(self, name):
|
||||
if not super(StubParserTreeFilter, self)._is_name_reachable(name):
|
||||
|
||||
Reference in New Issue
Block a user