forked from VimPlug/jedi
Starting to create a way of how context sensitive completions can be made.
This involves playing heavily with the parser pgen2. We use its stack to check for all possible tokens/keywords.
This commit is contained in:
@@ -186,7 +186,7 @@ class Script(object):
|
||||
return new_defs
|
||||
|
||||
goto_path = self._user_context.get_path_under_cursor()
|
||||
context = self._user_context.get_context()
|
||||
context = self._user_context.get_reverse_context()
|
||||
definitions = []
|
||||
if next(context) in ('class', 'def'):
|
||||
definitions = [self._evaluator.wrap(self._parser.user_scope())]
|
||||
@@ -253,7 +253,7 @@ class Script(object):
|
||||
return definitions
|
||||
|
||||
goto_path = self._user_context.get_path_under_cursor()
|
||||
context = self._user_context.get_context()
|
||||
context = self._user_context.get_reverse_context()
|
||||
user_stmt = self._parser.user_stmt()
|
||||
user_scope = self._parser.user_scope()
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from itertools import chain
|
||||
import re
|
||||
|
||||
from jedi.parser import token
|
||||
from jedi.parser import tree
|
||||
from jedi import debug
|
||||
from jedi import settings
|
||||
@@ -70,7 +71,7 @@ class Completion:
|
||||
|
||||
user_stmt = self._parser.user_stmt_with_whitespace()
|
||||
|
||||
completion_names = self.get_completions(user_stmt, completion_parts)
|
||||
completion_names = self._get_context_completions(user_stmt, completion_parts)
|
||||
|
||||
if not completion_parts.has_dot:
|
||||
call_signatures = self._call_signatures_method()
|
||||
@@ -85,30 +86,73 @@ class Completion:
|
||||
x.name.startswith('_'),
|
||||
x.name.lower()))
|
||||
|
||||
def get_completions(self, user_stmt, completion_parts):
|
||||
# TODO this closure is ugly. it also doesn't work with
|
||||
# simple_complete (used for Interpreter), somehow redo.
|
||||
def _get_context_completions(self, user_stmt, completion_parts):
|
||||
"""
|
||||
Analyzes the context that a completion is made in and decides what to
|
||||
return.
|
||||
|
||||
Could provide context for:
|
||||
- from/import completions
|
||||
- as nothing
|
||||
- statements that start always on new line
|
||||
'import', 'class', 'def', 'try', 'except',
|
||||
'finally', 'while', with
|
||||
- statements that start always on new line or after ; or after :
|
||||
return raise continue break del pass global nonlocal assert
|
||||
- def/class nothing
|
||||
- async for/def/with
|
||||
- \n@/del/return/raise no keyword (after keyword no keyword)?
|
||||
- after keyword
|
||||
- continue/break/pass nothing
|
||||
- global/nonlocal search global
|
||||
- after operator no keyword: return
|
||||
- yield like return + after ( and =
|
||||
- almost always ok
|
||||
'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or'
|
||||
- after operations no keyword:
|
||||
+ = * ** - etc Maybe work with the parser state?
|
||||
|
||||
# hard:
|
||||
- await
|
||||
- yield from / raise from / from import difference
|
||||
- In args: */**: no completion
|
||||
- In params (also lambda): no completion before =
|
||||
"""
|
||||
module = self._evaluator.wrap(self._parser.module())
|
||||
names, level, only_modules, unfinished_dotted = \
|
||||
helpers.check_error_statements(module, self._pos)
|
||||
|
||||
grammar = self._evaluator.grammar
|
||||
stack = helpers.get_stack_at_position(grammar, module, self._pos)
|
||||
allowed_keywords, allowed_tokens = \
|
||||
helpers.get_possible_completion_types(grammar, stack)
|
||||
|
||||
completion_names = list(self._get_keyword_completion_names(allowed_keywords))
|
||||
if token.NAME in allowed_tokens:
|
||||
# Differentiate between import names and other names.
|
||||
completion_names += self._simple_complete(completion_parts)
|
||||
|
||||
completion_names = []
|
||||
if names is not None:
|
||||
imp_names = tuple(str(n) for n in names if n.end_pos < self._pos)
|
||||
i = imports.Importer(self._evaluator, imp_names, module, level)
|
||||
completion_names = i.completion_names(self._evaluator, only_modules)
|
||||
|
||||
return completion_names
|
||||
|
||||
# TODO this paragraph is necessary, but not sure it works.
|
||||
context = self._user_context.get_context()
|
||||
if not next(context).startswith('.'): # skip the path
|
||||
if next(context) == 'from':
|
||||
# completion is just "import" if before stands from ..
|
||||
if unfinished_dotted:
|
||||
return completion_names
|
||||
else:
|
||||
return [keywords.keyword(self._evaluator, 'import').name]
|
||||
context = self._user_context.get_backwards_context_tokens()
|
||||
x = next(context, None)
|
||||
#print(x)
|
||||
#if not x.string.startswith('.'): # skip the path
|
||||
if next(context, None).string == 'from':
|
||||
# completion is just "import" if before stands from ..
|
||||
if unfinished_dotted:
|
||||
return completion_names
|
||||
else:
|
||||
return [keywords.keyword(self._evaluator, 'import').name]
|
||||
|
||||
if isinstance(user_stmt, tree.Import):
|
||||
module = self._parser.module()
|
||||
completion_names += imports.completion_names(self._evaluator,
|
||||
user_stmt, self._pos)
|
||||
return completion_names
|
||||
@@ -126,6 +170,10 @@ class Completion:
|
||||
completion_names += self._simple_complete(completion_parts)
|
||||
return completion_names
|
||||
|
||||
def _get_keyword_completion_names(self, keywords):
|
||||
for keyword in keywords:
|
||||
yield keywords.keyword(self._evaluator, keyword).name
|
||||
|
||||
def _simple_complete(self, completion_parts):
|
||||
if not completion_parts.path and not completion_parts.has_dot:
|
||||
scope = self._parser.user_scope()
|
||||
|
||||
@@ -4,8 +4,11 @@ Helpers for the API
|
||||
import re
|
||||
from collections import namedtuple
|
||||
|
||||
from jedi import common
|
||||
from jedi.parser import tree as pt
|
||||
from jedi.evaluate import imports
|
||||
from jedi import parser
|
||||
from jedi.parser import tokenize, token
|
||||
|
||||
|
||||
CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])
|
||||
@@ -46,6 +49,83 @@ def check_error_statements(module, pos):
|
||||
return None, 0, False, False
|
||||
|
||||
|
||||
def get_code_until(code, start_pos, end_pos):
|
||||
lines = common.splitlines(code)
|
||||
line_difference = end_pos[0] - start_pos[0]
|
||||
if line_difference == 0:
|
||||
end_line_length = end_pos[1] - start_pos[1]
|
||||
else:
|
||||
end_line_length = end_pos[1]
|
||||
|
||||
if line_difference > len(lines) or end_line_length > len(lines[-1]):
|
||||
raise ValueError("The end_pos seems to be after the code part.")
|
||||
|
||||
new_lines = lines[:line_difference] + [lines[-1][:end_line_length]]
|
||||
return '\n'.join(new_lines)
|
||||
|
||||
|
||||
def get_stack_at_position(grammar, module, pos):
|
||||
"""
|
||||
Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
|
||||
"""
|
||||
for error_statement in module.error_statement_stacks:
|
||||
if error_statement.first_pos < pos <= error_statement.next_start_pos:
|
||||
code = error_statement.get_code()
|
||||
code = get_code_until(code, error_statement.first_pos, pos)
|
||||
break
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
class EndMarkerReached(Exception):
|
||||
pass
|
||||
|
||||
def tokenize_without_endmarker(code):
|
||||
for token_ in tokenize.source_tokens(code):
|
||||
if token_[0] == token.ENDMARKER:
|
||||
raise EndMarkerReached()
|
||||
else:
|
||||
yield token_
|
||||
|
||||
p = parser.Parser(grammar, code, tokenizer=tokenize_without_endmarker(code),
|
||||
start_parsing=False)
|
||||
try:
|
||||
p.parse()
|
||||
except EndMarkerReached:
|
||||
return p.pgen_parser.stack
|
||||
|
||||
|
||||
def get_possible_completion_types(grammar, stack):
|
||||
def add_results(label_index):
|
||||
try:
|
||||
grammar_labels.append(inversed_tokens[label_index])
|
||||
except KeyError:
|
||||
try:
|
||||
keywords.append(inversed_keywords[label_index])
|
||||
except KeyError:
|
||||
t, v = grammar.labels[label_index]
|
||||
assert t >= 256
|
||||
# See if it's a symbol and if we're in its first set
|
||||
inversed_keywords
|
||||
itsdfa = grammar.dfas[t]
|
||||
itsstates, itsfirst = itsdfa
|
||||
for first_label_index in itsfirst.keys():
|
||||
add_results(first_label_index)
|
||||
|
||||
dfa, state, node = stack[-1]
|
||||
states, first = dfa
|
||||
arcs = states[state]
|
||||
|
||||
inversed_keywords = dict((v, k) for k, v in grammar.keywords.items())
|
||||
inversed_tokens = dict((v, k) for k, v in grammar.tokens.items())
|
||||
|
||||
keywords = []
|
||||
grammar_labels = []
|
||||
for label_index, new_state in arcs:
|
||||
add_results(label_index)
|
||||
|
||||
return keywords, grammar_labels
|
||||
|
||||
|
||||
def importer_from_error_statement(error_statement, pos):
|
||||
def check_dotted(children):
|
||||
for name in children[::2]:
|
||||
|
||||
Reference in New Issue
Block a user