1
0
forked from VimPlug/jedi

Break Interpreter completion even more in favor of a better solution in the future.

This commit is contained in:
Dave Halter
2016-06-03 19:31:42 +02:00
parent 0c7894b3e6
commit 5edcf47512
9 changed files with 73 additions and 30 deletions

View File

@@ -10,7 +10,6 @@ import re
import os
import warnings
import sys
import collections
from jedi._compatibility import unicode
from jedi.parser import load_grammar
@@ -135,6 +134,9 @@ class Script(object):
module = self._evaluator.wrap(parser.module)
imports.add_module(self._evaluator, unicode(module.name), module)
def _get_module(self):
return self._parser.module()
@property
def source_path(self):
"""
@@ -336,7 +338,7 @@ class Script(object):
definitions)
module = set([d.get_parent_until() for d in definitions])
module.add(self._parser.module())
module.add(self._get_module())
names = usages.usages(self._evaluator, definitions, module)
for d in set(definitions):
@@ -381,9 +383,9 @@ class Script(object):
def _analysis(self):
self._evaluator.is_analysis = True
self._evaluator.analysis_modules = [self._parser.module()]
self._evaluator.analysis_modules = [self._get_module()]
try:
for node in self._parser.module().nodes_to_execute():
for node in self._get_module().nodes_to_execute():
if node.type in ('funcdef', 'classdef'):
if node.type == 'classdef':
continue
@@ -440,8 +442,9 @@ class Interpreter(Script):
If `line` and `column` are None, they are assumed be at the end of
`source`.
"""
if type(namespaces) is not list or len(namespaces) == 0 or \
not all(isinstance(x, collections.Mapping) for x in namespaces):
try:
namespaces = [dict(n) for n in namespaces]
except Exception:
raise TypeError("namespaces must be a non-empty list of dicts.")
super(Interpreter, self).__init__(source, **kwds)
@@ -454,8 +457,12 @@ class Interpreter(Script):
self._orig_path, self._pos,
self._user_context, self._parsed_callback,
use_fast_parser=False)
interpreter.add_namespaces_to_parser(self._evaluator, namespaces,
self._parser.module())
#interpreter.add_namespaces_to_parser(self._evaluator, namespaces,
#self._get_module())
def _get_module(self):
parser_module = super(Interpreter, self)._get_module()
return interpreter.MixedModule(parser_module, self.namespaces)
def defined_names(source, path=None, encoding='utf-8'):
@@ -501,7 +508,7 @@ def names(source=None, path=None, encoding='utf-8', all_scopes=False,
# Set line/column to a random position, because they don't matter.
script = Script(source, line=1, column=0, path=path, encoding=encoding)
defs = [classes.Definition(script._evaluator, name_part)
for name_part in get_module_names(script._parser.module(), all_scopes)]
for name_part in get_module_names(script._get_module(), all_scopes)]
return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column))

View File

@@ -93,7 +93,7 @@ class Completion:
Analyzes the context that a completion is made in and decides what to
return.
Could provide context for:
Could specialized completions for:
- from/import completions
- as nothing
- statements that start always on new line

View File

@@ -139,9 +139,15 @@ def get_stack_at_position(grammar, source, module, pos):
pass
def tokenize_without_endmarker(code):
for token_ in tokenize.source_tokens(code, use_exact_op_types=True):
tokens = tokenize.source_tokens(code, use_exact_op_types=True)
for token_ in tokens:
if token_[0] == token.ENDMARKER:
raise EndMarkerReached()
elif token_[0] == token.DEDENT:
# Ignore those. Error statements should not contain them, if
# they do it's for cases where an indentation happens and
# before the endmarker we still see them.
pass
else:
yield token_

View File

@@ -31,6 +31,23 @@ def add_namespaces_to_parser(evaluator, namespace_dicts, parser_module):
#arr.append(LazyName(evaluator, parser_module, key, value))
class MixedModule():
def __init__(self, evaluator, parser_module, namespaces):
self._evaluator = evaluator
self._parser_module = parser_module
self._namespaces = namespaces
def names_dicts(self):
for names_dict in self._parser_module.names_dicts():
yield names_dict
for namespace in self._namespaces:
print('ole')
yield mixed.MixedObject(self._evaluator, namespace, self._parser_module.name)
yield namespace
class LazyName(helpers.FakeName):
def __init__(self, evaluator, module, name, value):
super(LazyName, self).__init__(name)

View File

@@ -85,7 +85,8 @@ class Evaluator(object):
self.memoize_cache = {} # for memoize decorators
# To memorize modules -> equals `sys.modules`.
self.modules = {} # like `sys.modules`.
self.compiled_cache = {} # see `compiled.create()`
self.compiled_cache = {} # see `evaluate.compiled.create()`
self.mixed_cache = {} # see `evaluate.compiled.mixed.create()`
self.analysis = []
self.predefined_if_name_dict_dict = {}
self.is_analysis = False

View File

@@ -475,21 +475,35 @@ def get_special_object(evaluator, identifier):
return create(evaluator, obj, parent=create(evaluator, _builtins))
def compiled_objects_cache(func):
def compiled_objects_cache(attribute_name):
def decorator(func):
"""
This decorator caches just the ids, oopposed to caching the object itself.
Caching the id has the advantage that an object doesn't need to be
hashable.
"""
def wrapper(evaluator, obj, parent=None, module=None):
cache = getattr(evaluator, attribute_name)
# Do a very cheap form of caching here.
key = id(obj)
try:
return evaluator.compiled_cache[key][0]
return cache[key][0]
except KeyError:
# TODO this whole decorator looks way too ugly and this if
# doesn't make it better. Find a more generic solution.
if parent or module:
result = func(evaluator, obj, parent, module)
else:
result = func(evaluator, obj)
# Need to cache all of them, otherwise the id could be overwritten.
evaluator.compiled_cache[key] = result, obj, parent, module
cache[key] = result, obj, parent, module
return result
return wrapper
return decorator
@compiled_objects_cache
@compiled_objects_cache('compiled_cache')
def create(evaluator, obj, parent=None, module=None):
"""
A very weird interface class to this module. The more options provided the

View File

@@ -134,7 +134,7 @@ def find_syntax_node_name(evaluator, python_object):
return names[-1]
@memoize_default(evaluator_is_first_arg=True)
@compiled.compiled_objects_cache('mixed_cache')
def create(evaluator, obj):
name = find_syntax_node_name(evaluator, obj)
if name is None:

View File

@@ -62,8 +62,8 @@ def filter_definition_names(names, origin, position=None):
stmt = names[0].get_definition()
scope = stmt.get_parent_scope()
if not (isinstance(scope, er.FunctionExecution)
and isinstance(scope.base, er.LambdaWrapper)):
if not (isinstance(scope, er.FunctionExecution) and
isinstance(scope.base, er.LambdaWrapper)):
names = filter_after_position(names, position)
names = [name for name in names if name.is_definition()]

View File

@@ -226,8 +226,6 @@ class Parser(object):
def _tokenize(self, tokenizer):
for typ, value, start_pos, prefix in tokenizer:
if typ == ERRORTOKEN:
raise ParseError
yield typ, value, prefix, start_pos
def error_recovery(self, grammar, stack, arcs, typ, value, start_pos, prefix,