1
0
forked from VimPlug/jedi

Progress and actually passing a few tests.

This commit is contained in:
Dave Halter
2016-05-26 00:10:54 +02:00
parent d4a10929e2
commit cbba314286
7 changed files with 169 additions and 61 deletions

View File

@@ -162,8 +162,9 @@ class Script(object):
self._evaluator, self._parser, self._user_context,
self._pos, self.call_signatures
)
completions = completion.completions(path)
debug.speed('completions end')
return completion.completions(path)
return completions
def goto_definitions(self):
"""

View File

@@ -123,20 +123,38 @@ class Completion:
helpers.check_error_statements(module, self._pos)
grammar = self._evaluator.grammar
stack = helpers.get_stack_at_position(grammar, module, self._pos)
# Now we set the position to the place where we try to find out what we
# have before it.
pos = self._pos
if completion_parts.name:
pos = pos[0], pos[1] - len(completion_parts.name)
stack = helpers.get_stack_at_position(grammar, module, pos)
allowed_keywords, allowed_tokens = \
helpers.get_possible_completion_types(grammar, stack)
print(allowed_keywords, allowed_tokens)
completion_names = list(self._get_keyword_completion_names(allowed_keywords))
if token.NAME in allowed_tokens:
# Differentiate between import names and other names.
completion_names += self._simple_complete(completion_parts)
if token.NAME in allowed_tokens:
# This means that we actually have to do type inference.
symbol_names = list(stack.get_node_names(grammar))
if "import_stmt" in symbol_names:
if "dotted_name" in symbol_names:
completion_names += self._complete_dotted_name(stack, module)
else:
completion_names += self._simple_complete(completion_parts)
"""
completion_names = []
if names is not None:
imp_names = tuple(str(n) for n in names if n.end_pos < self._pos)
i = imports.Importer(self._evaluator, imp_names, module, level)
completion_names = i.completion_names(self._evaluator, only_modules)
"""
return completion_names
@@ -170,9 +188,9 @@ class Completion:
completion_names += self._simple_complete(completion_parts)
return completion_names
def _get_keyword_completion_names(self, keywords):
for keyword in keywords:
yield keywords.keyword(self._evaluator, keyword).name
def _get_keyword_completion_names(self, keywords_):
for k in keywords_:
yield keywords.keyword(self._evaluator, k).name
def _simple_complete(self, completion_parts):
if not completion_parts.path and not completion_parts.has_dot:
@@ -211,3 +229,18 @@ class Completion:
names, self._parser.user_stmt()
)
return completion_names
def _complete_dotted_name(self, stack, module):
nodes = list(stack.get_nodes())
level = 0
for i, node in enumerate(nodes[1:], 1):
if node in ('.', '...'):
level += len(node.value)
else:
names = [str(n) for n in nodes[i::2]]
break
print(names, nodes)
i = imports.Importer(self._evaluator, names, module, level)
return i.completion_names(self._evaluator, only_modules=True)

View File

@@ -13,6 +13,7 @@ from jedi.parser import tokenize, token
CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])
def get_completion_parts(path_until_cursor):
"""
Returns the parts for the completion
@@ -42,25 +43,28 @@ def get_on_import_stmt(evaluator, user_context, user_stmt, is_like_search=False)
def check_error_statements(module, pos):
for error_statement in module.error_statement_stacks:
for error_statement in module.error_statements:
if error_statement.first_type in ('import_from', 'import_name') \
and error_statement.first_pos < pos <= error_statement.next_start_pos:
and error_statement.start_pos < pos <= error_statement.end_pos:
return importer_from_error_statement(error_statement, pos)
return None, 0, False, False
def get_code_until(code, start_pos, end_pos):
def get_code_until(code, code_start_pos, end_pos):
"""
:param code_start_pos: is where the code starts.
"""
lines = common.splitlines(code)
line_difference = end_pos[0] - start_pos[0]
line_difference = end_pos[0] - code_start_pos[0]
if line_difference == 0:
end_line_length = end_pos[1] - start_pos[1]
end_line_length = end_pos[1] - code_start_pos[1]
else:
end_line_length = end_pos[1]
if line_difference > len(lines) or end_line_length > len(lines[-1]):
if line_difference > len(lines) or end_line_length > len(lines[line_difference]):
raise ValueError("The end_pos seems to be after the code part.")
new_lines = lines[:line_difference] + [lines[-1][:end_line_length]]
new_lines = lines[:line_difference] + [lines[line_difference][:end_line_length]]
return '\n'.join(new_lines)
@@ -68,30 +72,56 @@ def get_stack_at_position(grammar, module, pos):
"""
Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
"""
for error_statement in module.error_statement_stacks:
if error_statement.first_pos < pos <= error_statement.next_start_pos:
code = error_statement.get_code()
code = get_code_until(code, error_statement.first_pos, pos)
break
user_stmt = module.get_statement_for_position(pos)
if user_stmt is None:
# If there's no error statement and we're just somewhere, we want
# completions for just whitespace.
code = ''
for error_statement in module.error_statements:
if error_statement.start_pos < pos <= error_statement.end_pos:
code = error_statement.get_code(include_prefix=False)
start_pos = error_statement.start_pos
break
else:
raise NotImplementedError
code = user_stmt.get_code_with_error_statements(include_prefix=False)
start_pos = user_stmt.start_pos
# Remove indentations.
code = code.lstrip()
code = get_code_until(code, start_pos, pos)
# Remove whitespace at the end.
code = code.rstrip()
class EndMarkerReached(Exception):
pass
def tokenize_without_endmarker(code):
for token_ in tokenize.source_tokens(code):
for token_ in tokenize.source_tokens(code, use_exact_op_types=True):
if token_[0] == token.ENDMARKER:
raise EndMarkerReached()
else:
print(token_, token.tok_name[token_[0]])
yield token_
p = parser.Parser(grammar, code, tokenizer=tokenize_without_endmarker(code),
print(repr(code))
p = parser.Parser(grammar, code,
start_parsing=False)
try:
p.parse()
p.parse(tokenizer=tokenize_without_endmarker(code))
except EndMarkerReached:
return p.pgen_parser.stack
return Stack(p.pgen_parser.stack)
class Stack(list):
def get_node_names(self, grammar):
for dfa, state, (node_number, nodes) in self:
yield grammar.number2symbol[node_number]
def get_nodes(self):
for dfa, state, (node_number, nodes) in self:
for node in nodes:
yield node
def get_possible_completion_types(grammar, stack):

View File

@@ -24,7 +24,7 @@ def type_inference(evaluator, parser, user_context, position, dotted_path, is_co
# matched to much.
return []
if isinstance(user_stmt, tree.Import):
if isinstance(user_stmt, tree.Import) and not is_completion:
i, _ = helpers.get_on_import_stmt(evaluator, user_context,
user_stmt, is_completion)
if i is None:
@@ -36,12 +36,13 @@ def type_inference(evaluator, parser, user_context, position, dotted_path, is_co
if eval_stmt is None:
return []
module = evaluator.wrap(parser.module())
names, level, _, _ = helpers.check_error_statements(module, position)
if names:
names = [str(n) for n in names]
i = imports.Importer(evaluator, names, module, level)
return i.follow()
if not is_completion:
module = evaluator.wrap(parser.module())
names, level, _, _ = helpers.check_error_statements(module, position)
if names:
names = [str(n) for n in names]
i = imports.Importer(evaluator, names, module, level)
return i.follow()
scopes = evaluator.eval_element(eval_stmt)