mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-22 21:31:26 +08:00
Merge branch 'dict', fixes #951
This commit is contained in:
@@ -10,6 +10,7 @@ from jedi import settings
|
||||
from jedi.api import classes
|
||||
from jedi.api import helpers
|
||||
from jedi.api import keywords
|
||||
from jedi.api.strings import complete_dict
|
||||
from jedi.api.file_name import complete_file_name
|
||||
from jedi.inference import imports
|
||||
from jedi.inference.base_value import ValueSet
|
||||
@@ -59,6 +60,11 @@ def filter_names(inference_state, completion_names, stack, like_name, fuzzy):
|
||||
yield new
|
||||
|
||||
|
||||
def _remove_duplicates(completions, other_completions):
|
||||
names = {d.name for d in other_completions}
|
||||
return [c for c in completions if c.name not in names]
|
||||
|
||||
|
||||
def get_user_context(module_context, position):
|
||||
"""
|
||||
Returns the scope in which the user resides. This includes flows.
|
||||
@@ -95,36 +101,52 @@ class Completion:
|
||||
# The actual cursor position is not what we need to calculate
|
||||
# everything. We want the start of the name we're on.
|
||||
self._original_position = position
|
||||
self._position = position[0], position[1] - len(self._like_name)
|
||||
self._signatures_callback = signatures_callback
|
||||
|
||||
self._fuzzy = fuzzy
|
||||
|
||||
def complete(self, fuzzy):
|
||||
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
|
||||
string, start_leaf = _extract_string_while_in_string(leaf, self._position)
|
||||
if string is not None:
|
||||
completions = list(complete_file_name(
|
||||
leaf = self._module_node.get_leaf_for_position(
|
||||
self._original_position,
|
||||
include_prefixes=True
|
||||
)
|
||||
string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position)
|
||||
|
||||
prefixed_completions = complete_dict(
|
||||
self._module_context,
|
||||
self._code_lines,
|
||||
start_leaf or leaf,
|
||||
self._original_position,
|
||||
None if string is None else quote + string,
|
||||
fuzzy=fuzzy,
|
||||
)
|
||||
|
||||
if string is not None and not prefixed_completions:
|
||||
prefixed_completions = list(complete_file_name(
|
||||
self._inference_state, self._module_context, start_leaf, string,
|
||||
self._like_name, self._signatures_callback,
|
||||
self._code_lines, self._original_position,
|
||||
fuzzy
|
||||
))
|
||||
if completions:
|
||||
return completions
|
||||
if string is not None:
|
||||
return prefixed_completions
|
||||
|
||||
completion_names = self._complete_python(leaf)
|
||||
|
||||
completions = filter_names(self._inference_state, completion_names,
|
||||
self.stack, self._like_name, fuzzy)
|
||||
completions = list(filter_names(self._inference_state, completion_names,
|
||||
self.stack, self._like_name, fuzzy))
|
||||
|
||||
return sorted(completions, key=lambda x: (x.name.startswith('__'),
|
||||
return (
|
||||
# Removing duplicates mostly to remove False/True/None duplicates.
|
||||
_remove_duplicates(prefixed_completions, completions)
|
||||
+ sorted(completions, key=lambda x: (x.name.startswith('__'),
|
||||
x.name.startswith('_'),
|
||||
x.name.lower()))
|
||||
)
|
||||
|
||||
def _complete_python(self, leaf):
|
||||
"""
|
||||
Analyzes the value that a completion is made in and decides what to
|
||||
Analyzes the current context of a completion and decides what to
|
||||
return.
|
||||
|
||||
Technically this works by generating a parser stack and analysing the
|
||||
@@ -139,6 +161,10 @@ class Completion:
|
||||
|
||||
grammar = self._inference_state.grammar
|
||||
self.stack = stack = None
|
||||
self._position = (
|
||||
self._original_position[0],
|
||||
self._original_position[1] - len(self._like_name)
|
||||
)
|
||||
|
||||
try:
|
||||
self.stack = stack = helpers.get_stack_at_position(
|
||||
@@ -191,9 +217,12 @@ class Completion:
|
||||
|
||||
completion_names = []
|
||||
current_line = self._code_lines[self._position[0] - 1][:self._position[1]]
|
||||
if not current_line or current_line[-1] in ' \t.;' \
|
||||
and current_line[-3:] != '...':
|
||||
completion_names += self._complete_keywords(allowed_transitions)
|
||||
|
||||
completion_names += self._complete_keywords(
|
||||
allowed_transitions,
|
||||
only_values=not (not current_line or current_line[-1] in ' \t.;'
|
||||
and current_line[-3:] != '...')
|
||||
)
|
||||
|
||||
if any(t in allowed_transitions for t in (PythonTokenTypes.NAME,
|
||||
PythonTokenTypes.INDENT)):
|
||||
@@ -201,13 +230,7 @@ class Completion:
|
||||
|
||||
nonterminals = [stack_node.nonterminal for stack_node in stack]
|
||||
|
||||
nodes = []
|
||||
for stack_node in stack:
|
||||
if stack_node.dfa.from_rule == 'small_stmt':
|
||||
nodes = []
|
||||
else:
|
||||
nodes += stack_node.nodes
|
||||
|
||||
nodes = _gather_nodes(stack)
|
||||
if nodes and nodes[-1] in ('as', 'def', 'class'):
|
||||
# No completions for ``with x as foo`` and ``import x as foo``.
|
||||
# Also true for defining names as a class or function.
|
||||
@@ -279,9 +302,10 @@ class Completion:
|
||||
return complete_param_names(context, function_name.value, decorators)
|
||||
return []
|
||||
|
||||
def _complete_keywords(self, allowed_transitions):
|
||||
def _complete_keywords(self, allowed_transitions, only_values):
|
||||
for k in allowed_transitions:
|
||||
if isinstance(k, str) and k.isalpha():
|
||||
if not only_values or k in ('True', 'False', 'None'):
|
||||
yield keywords.KeywordName(self._inference_state, k)
|
||||
|
||||
def _complete_global_scope(self):
|
||||
@@ -411,23 +435,54 @@ class Completion:
|
||||
yield name
|
||||
|
||||
|
||||
def _gather_nodes(stack):
|
||||
nodes = []
|
||||
for stack_node in stack:
|
||||
if stack_node.dfa.from_rule == 'small_stmt':
|
||||
nodes = []
|
||||
else:
|
||||
nodes += stack_node.nodes
|
||||
return nodes
|
||||
|
||||
|
||||
_string_start = re.compile(r'^\w*(\'{3}|"{3}|\'|")')
|
||||
|
||||
|
||||
def _extract_string_while_in_string(leaf, position):
|
||||
def return_part_of_leaf(leaf):
|
||||
kwargs = {}
|
||||
if leaf.line == position[0]:
|
||||
kwargs['endpos'] = position[1] - leaf.column
|
||||
match = _string_start.match(leaf.value, **kwargs)
|
||||
start = match.group(0)
|
||||
if leaf.line == position[0] and position[1] < leaf.column + match.end():
|
||||
return None, None, None
|
||||
return cut_value_at_position(leaf, position)[match.end():], leaf, start
|
||||
|
||||
if position < leaf.start_pos:
|
||||
return None, None
|
||||
return None, None, None
|
||||
|
||||
if leaf.type == 'string':
|
||||
match = re.match(r'^\w*(\'{3}|"{3}|\'|")', leaf.value)
|
||||
quote = match.group(1)
|
||||
if leaf.line == position[0] and position[1] < leaf.column + match.end():
|
||||
return None, None
|
||||
if leaf.end_pos[0] == position[0] and position[1] > leaf.end_pos[1] - len(quote):
|
||||
return None, None
|
||||
return cut_value_at_position(leaf, position)[match.end():], leaf
|
||||
return return_part_of_leaf(leaf)
|
||||
|
||||
leaves = []
|
||||
while leaf is not None and leaf.line == position[0]:
|
||||
if leaf.type == 'error_leaf' and ('"' in leaf.value or "'" in leaf.value):
|
||||
return ''.join(l.get_code() for l in leaves), leaf
|
||||
if len(leaf.value) > 1:
|
||||
return return_part_of_leaf(leaf)
|
||||
prefix_leaf = None
|
||||
if not leaf.prefix:
|
||||
prefix_leaf = leaf.get_previous_leaf()
|
||||
if prefix_leaf is None or prefix_leaf.type != 'name' \
|
||||
or not all(c in 'rubf' for c in prefix_leaf.value.lower()):
|
||||
prefix_leaf = None
|
||||
|
||||
return (
|
||||
''.join(cut_value_at_position(l, position) for l in leaves),
|
||||
prefix_leaf or leaf,
|
||||
('' if prefix_leaf is None else prefix_leaf.value)
|
||||
+ cut_value_at_position(leaf, position),
|
||||
)
|
||||
leaves.insert(0, leaf)
|
||||
leaf = leaf.get_previous_leaf()
|
||||
return None, None
|
||||
return None, None, None
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
import os
|
||||
|
||||
from jedi._compatibility import FileNotFoundError, force_unicode, scandir
|
||||
from jedi.inference.names import AbstractArbitraryName
|
||||
from jedi.api import classes
|
||||
from jedi.api.strings import StringName, get_quote_ending
|
||||
from jedi.api.helpers import fuzzy_match, start_match
|
||||
from jedi.inference.helpers import get_str_or_none
|
||||
from jedi.parser_utils import get_string_quote
|
||||
|
||||
|
||||
class PathName(StringName):
|
||||
api_type = u'path'
|
||||
|
||||
|
||||
def complete_file_name(inference_state, module_context, start_leaf, string,
|
||||
like_name, signatures_callback, code_lines, position, fuzzy):
|
||||
# First we want to find out what can actually be changed as a name.
|
||||
like_name_length = len(os.path.basename(string) + like_name)
|
||||
like_name_length = len(os.path.basename(string))
|
||||
|
||||
addition = _get_string_additions(module_context, start_leaf)
|
||||
if addition is None:
|
||||
@@ -20,7 +23,7 @@ def complete_file_name(inference_state, module_context, start_leaf, string,
|
||||
|
||||
# Here we use basename again, because if strings are added like
|
||||
# `'foo' + 'bar`, it should complete to `foobar/`.
|
||||
must_start_with = os.path.basename(string) + like_name
|
||||
must_start_with = os.path.basename(string)
|
||||
string = os.path.dirname(string)
|
||||
|
||||
sigs = signatures_callback(*position)
|
||||
@@ -45,22 +48,13 @@ def complete_file_name(inference_state, module_context, start_leaf, string,
|
||||
match = start_match(name, must_start_with)
|
||||
if match:
|
||||
if is_in_os_path_join or not entry.is_dir():
|
||||
if start_leaf.type == 'string':
|
||||
quote = get_string_quote(start_leaf)
|
||||
else:
|
||||
assert start_leaf.type == 'error_leaf'
|
||||
quote = start_leaf.value
|
||||
potential_other_quote = \
|
||||
code_lines[position[0] - 1][position[1]:position[1] + len(quote)]
|
||||
# Add a quote if it's not already there.
|
||||
if quote != potential_other_quote:
|
||||
name += quote
|
||||
name += get_quote_ending(start_leaf.value, code_lines, position)
|
||||
else:
|
||||
name += os.path.sep
|
||||
|
||||
yield classes.Completion(
|
||||
inference_state,
|
||||
FileName(inference_state, name[len(must_start_with) - like_name_length:]),
|
||||
PathName(inference_state, name[len(must_start_with) - like_name_length:]),
|
||||
stack=None,
|
||||
like_name_length=like_name_length,
|
||||
is_fuzzy=fuzzy,
|
||||
@@ -106,11 +100,6 @@ def _add_strings(context, nodes, add_slash=False):
|
||||
return string
|
||||
|
||||
|
||||
class FileName(AbstractArbitraryName):
|
||||
api_type = u'path'
|
||||
is_value_name = False
|
||||
|
||||
|
||||
def _add_os_path_join(module_context, start_leaf, bracket_start):
|
||||
def check(maybe_bracket, nodes):
|
||||
if maybe_bracket.start_pos != bracket_start:
|
||||
|
||||
110
jedi/api/strings.py
Normal file
110
jedi/api/strings.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""
|
||||
This module is here for string completions. This means mostly stuff where
|
||||
strings are returned, like `foo = dict(bar=3); foo["ba` would complete to
|
||||
`"bar"]`.
|
||||
|
||||
It however does the same for numbers. The difference between string completions
|
||||
and other completions is mostly that this module doesn't return defined
|
||||
names in a module, but pretty much an arbitrary string.
|
||||
"""
|
||||
import re
|
||||
|
||||
from jedi._compatibility import unicode
|
||||
from jedi.inference.names import AbstractArbitraryName
|
||||
from jedi.inference.helpers import infer_call_of_leaf
|
||||
from jedi.api.classes import Completion
|
||||
from jedi.parser_utils import cut_value_at_position
|
||||
|
||||
_sentinel = object()
|
||||
|
||||
|
||||
class StringName(AbstractArbitraryName):
|
||||
api_type = u'string'
|
||||
is_value_name = False
|
||||
|
||||
|
||||
def complete_dict(module_context, code_lines, leaf, position, string, fuzzy):
|
||||
bracket_leaf = leaf
|
||||
if bracket_leaf != '[':
|
||||
bracket_leaf = leaf.get_previous_leaf()
|
||||
|
||||
cut_end_quote = ''
|
||||
if string:
|
||||
cut_end_quote = get_quote_ending(string, code_lines, position, invert_result=True)
|
||||
|
||||
if bracket_leaf == '[':
|
||||
if string is None and leaf is not bracket_leaf:
|
||||
string = cut_value_at_position(leaf, position)
|
||||
|
||||
context = module_context.create_context(bracket_leaf)
|
||||
before_bracket_leaf = bracket_leaf.get_previous_leaf()
|
||||
if before_bracket_leaf.type in ('atom', 'trailer', 'name'):
|
||||
values = infer_call_of_leaf(context, before_bracket_leaf)
|
||||
return list(_completions_for_dicts(
|
||||
module_context.inference_state,
|
||||
values,
|
||||
'' if string is None else string,
|
||||
cut_end_quote,
|
||||
fuzzy=fuzzy,
|
||||
))
|
||||
return []
|
||||
|
||||
|
||||
def _completions_for_dicts(inference_state, dicts, literal_string, cut_end_quote, fuzzy):
|
||||
for dict_key in sorted(_get_python_keys(dicts), key=lambda x: repr(x)):
|
||||
dict_key_str = _create_repr_string(literal_string, dict_key)
|
||||
if dict_key_str.startswith(literal_string):
|
||||
name = StringName(inference_state, dict_key_str[:-len(cut_end_quote) or None])
|
||||
yield Completion(
|
||||
inference_state,
|
||||
name,
|
||||
stack=None,
|
||||
like_name_length=len(literal_string),
|
||||
is_fuzzy=fuzzy
|
||||
)
|
||||
|
||||
|
||||
def _create_repr_string(literal_string, dict_key):
|
||||
if not isinstance(dict_key, (unicode, bytes)) or not literal_string:
|
||||
return repr(dict_key)
|
||||
|
||||
r = repr(dict_key)
|
||||
prefix, quote = _get_string_prefix_and_quote(literal_string)
|
||||
if quote is None:
|
||||
return r
|
||||
if quote == r[0]:
|
||||
return prefix + r
|
||||
return prefix + quote + r[1:-1] + quote
|
||||
|
||||
|
||||
def _get_python_keys(dicts):
|
||||
for dct in dicts:
|
||||
if dct.array_type == 'dict':
|
||||
for key in dct.get_key_values():
|
||||
dict_key = key.get_safe_value(default=_sentinel)
|
||||
if dict_key is not _sentinel:
|
||||
yield dict_key
|
||||
|
||||
|
||||
def _get_string_prefix_and_quote(string):
|
||||
match = re.match(r'(\w*)("""|\'{3}|"|\')', string)
|
||||
if match is None:
|
||||
return None, None
|
||||
return match.group(1), match.group(2)
|
||||
|
||||
|
||||
def _get_string_quote(string):
|
||||
return _get_string_prefix_and_quote(string)[1]
|
||||
|
||||
|
||||
def _matches_quote_at_position(code_lines, quote, position):
|
||||
string = code_lines[position[0] - 1][position[1]:position[1] + len(quote)]
|
||||
return string == quote
|
||||
|
||||
|
||||
def get_quote_ending(string, code_lines, position, invert_result=False):
|
||||
quote = _get_string_quote(string)
|
||||
# Add a quote only if it's not already there.
|
||||
if _matches_quote_at_position(code_lines, quote, position) != invert_result:
|
||||
return ''
|
||||
return quote
|
||||
@@ -410,13 +410,30 @@ class DirectObjectAccess(object):
|
||||
return [self._create_access(module), access]
|
||||
|
||||
def get_safe_value(self):
|
||||
if type(self._obj) in (bool, bytes, float, int, str, unicode, slice):
|
||||
if type(self._obj) in (bool, bytes, float, int, str, unicode, slice) or self._obj is None:
|
||||
return self._obj
|
||||
raise ValueError("Object is type %s and not simple" % type(self._obj))
|
||||
|
||||
def get_api_type(self):
|
||||
return get_api_type(self._obj)
|
||||
|
||||
def get_array_type(self):
|
||||
if isinstance(self._obj, dict):
|
||||
return 'dict'
|
||||
return None
|
||||
|
||||
def get_key_paths(self):
|
||||
def iter_partial_keys():
|
||||
# We could use list(keys()), but that might take a lot more memory.
|
||||
for (i, k) in enumerate(self._obj.keys()):
|
||||
# Limit key listing at some point. This is artificial, but this
|
||||
# way we don't get stalled because of slow completions
|
||||
if i > 50:
|
||||
break
|
||||
yield k
|
||||
|
||||
return [self._create_access_path(k) for k in iter_partial_keys()]
|
||||
|
||||
def get_access_path_tuples(self):
|
||||
accesses = [create_access(self._inference_state, o) for o in self._get_objects_path()]
|
||||
return [(access.py__name__(), access) for access in accesses]
|
||||
|
||||
@@ -281,6 +281,16 @@ class CompiledObject(Value):
|
||||
return CompiledModuleContext(self)
|
||||
return CompiledContext(self)
|
||||
|
||||
@property
|
||||
def array_type(self):
|
||||
return self.access_handle.get_array_type()
|
||||
|
||||
def get_key_values(self):
|
||||
return [
|
||||
create_from_access_path(self.inference_state, k)
|
||||
for k in self.access_handle.get_key_paths()
|
||||
]
|
||||
|
||||
|
||||
class CompiledName(AbstractNameDefinition):
|
||||
def __init__(self, inference_state, parent_context, name):
|
||||
|
||||
@@ -193,6 +193,9 @@ class DictModification(_Modification):
|
||||
yield lazy_context
|
||||
yield self._contextualized_key
|
||||
|
||||
def get_key_values(self):
|
||||
return self._wrapped_value.get_key_values() | self._contextualized_key.infer()
|
||||
|
||||
|
||||
class ListModification(_Modification):
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
|
||||
@@ -354,6 +354,24 @@ class TreeInstance(_BaseTreeInstance):
|
||||
def get_annotated_class_object(self):
|
||||
return self._get_annotated_class_object() or self.class_value
|
||||
|
||||
def get_key_values(self):
|
||||
values = NO_VALUES
|
||||
if self.array_type == 'dict':
|
||||
for i, (key, instance) in enumerate(self._arguments.unpack()):
|
||||
if key is None and i == 0:
|
||||
values |= ValueSet.from_sets(
|
||||
v.get_key_values()
|
||||
for v in instance.infer()
|
||||
if v.array_type == 'dict'
|
||||
)
|
||||
if key:
|
||||
values |= ValueSet([compiled.create_simple_object(
|
||||
self.inference_state,
|
||||
key,
|
||||
)])
|
||||
|
||||
return values
|
||||
|
||||
def py__simple_getitem__(self, index):
|
||||
if self.array_type == 'dict':
|
||||
# Logic for dict({'foo': bar}) and dict(foo=bar)
|
||||
|
||||
@@ -246,7 +246,17 @@ class GeneratorComprehension(_BaseComprehension, GeneratorBase):
|
||||
pass
|
||||
|
||||
|
||||
class DictComprehension(ComprehensionMixin, Sequence):
|
||||
class _DictKeyMixin(object):
|
||||
# TODO merge with _DictMixin?
|
||||
def get_mapping_item_values(self):
|
||||
return self._dict_keys(), self._dict_values()
|
||||
|
||||
def get_key_values(self):
|
||||
# TODO merge with _dict_keys?
|
||||
return self._dict_keys()
|
||||
|
||||
|
||||
class DictComprehension(ComprehensionMixin, Sequence, _DictKeyMixin):
|
||||
array_type = u'dict'
|
||||
|
||||
def __init__(self, inference_state, defining_context, sync_comp_for_node, key_node, value_node):
|
||||
@@ -296,9 +306,6 @@ class DictComprehension(ComprehensionMixin, Sequence):
|
||||
|
||||
return ValueSet([FakeList(self.inference_state, lazy_values)])
|
||||
|
||||
def get_mapping_item_values(self):
|
||||
return self._dict_keys(), self._dict_values()
|
||||
|
||||
def exact_key_items(self):
|
||||
# NOTE: A smarter thing can probably done here to achieve better
|
||||
# completions, but at least like this jedi doesn't crash
|
||||
@@ -409,7 +416,7 @@ class SequenceLiteralValue(Sequence):
|
||||
return "<%s of %s>" % (self.__class__.__name__, self.atom)
|
||||
|
||||
|
||||
class DictLiteralValue(_DictMixin, SequenceLiteralValue):
|
||||
class DictLiteralValue(_DictMixin, SequenceLiteralValue, _DictKeyMixin):
|
||||
array_type = u'dict'
|
||||
|
||||
def __init__(self, inference_state, defining_context, atom):
|
||||
@@ -474,9 +481,6 @@ class DictLiteralValue(_DictMixin, SequenceLiteralValue):
|
||||
for k, v in self.get_tree_entries()
|
||||
)
|
||||
|
||||
def get_mapping_item_values(self):
|
||||
return self._dict_keys(), self._dict_values()
|
||||
|
||||
|
||||
class _FakeSequence(Sequence):
|
||||
def __init__(self, inference_state, lazy_value_list):
|
||||
@@ -512,7 +516,7 @@ class FakeList(_FakeSequence):
|
||||
array_type = u'tuple'
|
||||
|
||||
|
||||
class FakeDict(_DictMixin, Sequence):
|
||||
class FakeDict(_DictMixin, Sequence, _DictKeyMixin):
|
||||
array_type = u'dict'
|
||||
|
||||
def __init__(self, inference_state, dct):
|
||||
@@ -556,9 +560,6 @@ class FakeDict(_DictMixin, Sequence):
|
||||
def _dict_keys(self):
|
||||
return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
|
||||
|
||||
def get_mapping_item_values(self):
|
||||
return self._dict_keys(), self._dict_values()
|
||||
|
||||
def exact_key_items(self):
|
||||
return self._dct.items()
|
||||
|
||||
|
||||
@@ -293,10 +293,6 @@ def cut_value_at_position(leaf, position):
|
||||
return ''.join(lines)
|
||||
|
||||
|
||||
def get_string_quote(leaf):
|
||||
return re.match(r'\w*("""|\'{3}|"|\')', leaf.value).group(1)
|
||||
|
||||
|
||||
def _function_is_x_method(method_name):
|
||||
def wrapper(function_node):
|
||||
"""
|
||||
|
||||
@@ -209,8 +209,10 @@ if r:
|
||||
|
||||
def a():
|
||||
"""
|
||||
#? ['global_define']
|
||||
#? []
|
||||
global_define
|
||||
#?
|
||||
str
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@@ -384,5 +384,14 @@ some_dct['y'] = tuple
|
||||
some_dct['x']
|
||||
#? int() str() list tuple
|
||||
some_dct['unknown']
|
||||
k = 'a'
|
||||
#? int()
|
||||
some_dct['a']
|
||||
some_dct[k]
|
||||
|
||||
some_other_dct = dict(some_dct, c=set)
|
||||
#? int()
|
||||
some_other_dct['a']
|
||||
#? list
|
||||
some_other_dct['x']
|
||||
#? set
|
||||
some_other_dct['c']
|
||||
|
||||
@@ -175,12 +175,12 @@ current_dirname = os.path.basename(dirname(dirname(dirname(__file__))))
|
||||
@pytest.mark.parametrize(
|
||||
'file, code, column, expected', [
|
||||
# General tests / relative paths
|
||||
(None, '"comp', None, ['ile', 'lex']), # No files like comp
|
||||
(None, '"comp', None, []), # No files like comp
|
||||
(None, '"test', None, [s]),
|
||||
(None, '"test', 4, ['t' + s]),
|
||||
('example.py', '"test%scomp' % s, None, ['letion' + s]),
|
||||
('example.py', 'r"comp"', None, "A LOT"),
|
||||
('example.py', 'r"tes"', None, "A LOT"),
|
||||
('example.py', 'r"comp"', None, []),
|
||||
('example.py', 'r"tes"', None, []),
|
||||
('example.py', 'r"tes"', 5, ['t' + s]),
|
||||
('example.py', 'r" tes"', 6, []),
|
||||
('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
|
||||
@@ -273,6 +273,85 @@ def test_file_path_completions(Script, file, code, column, expected):
|
||||
assert [c.complete for c in comps] == expected
|
||||
|
||||
|
||||
_dict_keys_completion_tests = [
|
||||
('ints[', 5, ['1', '50', Ellipsis]),
|
||||
('ints[]', 5, ['1', '50', Ellipsis]),
|
||||
('ints[1]', 5, ['1', '50', Ellipsis]),
|
||||
('ints[1]', 6, ['']),
|
||||
('ints[1', 5, ['1', '50', Ellipsis]),
|
||||
('ints[1', 6, ['']),
|
||||
|
||||
('ints[5]', 5, ['1', '50', Ellipsis]),
|
||||
('ints[5]', 6, ['0']),
|
||||
('ints[50', 5, ['1', '50', Ellipsis]),
|
||||
('ints[5', 6, ['0']),
|
||||
('ints[50', 6, ['0']),
|
||||
('ints[50', 7, ['']),
|
||||
|
||||
('strs[', 5, ["'asdf'", "'fbar'", "'foo'", Ellipsis]),
|
||||
('strs[]', 5, ["'asdf'", "'fbar'", "'foo'", Ellipsis]),
|
||||
("strs['", 6, ["asdf'", "fbar'", "foo'"]),
|
||||
("strs[']", 6, ["asdf'", "fbar'", "foo'"]),
|
||||
('strs["]', 6, ['asdf"', 'fbar"', 'foo"']),
|
||||
('strs["""]', 6, ['asdf', 'fbar', 'foo']),
|
||||
('strs["""]', 8, ['asdf"""', 'fbar"""', 'foo"""']),
|
||||
('strs[b"]', 8, []),
|
||||
('strs[r"asd', 10, ['f"']),
|
||||
('strs[r"asd"', 10, ['f']),
|
||||
('strs[R"asd', 10, ['f"']),
|
||||
('strs[f"asd', 10, []),
|
||||
('strs[br"""asd', 13, ['f"""']),
|
||||
('strs[br"""asd"""', 13, ['f']),
|
||||
|
||||
('strs["f', 7, ['bar"', 'oo"']),
|
||||
('strs["f"', 7, ['bar', 'oo']),
|
||||
('strs["f]', 7, ['bar"', 'oo"']),
|
||||
('strs["f"]', 7, ['bar', 'oo']),
|
||||
|
||||
('mixed[', 6, [r"'a\\sdf'", '1', '1.1', "b'foo'", Ellipsis]),
|
||||
('mixed[1', 7, ['', '.1']),
|
||||
('mixed[Non', 9, ['e']),
|
||||
|
||||
('casted["f', 9, ['3"', 'bar"', 'oo"']),
|
||||
('casted["f"', 9, ['3', 'bar', 'oo']),
|
||||
('casted["f3', 10, ['"']),
|
||||
('casted["f3"', 10, ['']),
|
||||
('casted_mod["f', 13, ['3"', 'bar"', 'oo"', 'ull"', 'uuu"']),
|
||||
|
||||
('keywords["', None, ['a"']),
|
||||
('keywords[Non', None, ['e']),
|
||||
('keywords[Fa', None, ['lse']),
|
||||
('keywords[Tr', None, ['ue']),
|
||||
('keywords[str', None, ['', 's']),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'added_code, column, expected', _dict_keys_completion_tests
|
||||
)
|
||||
def test_dict_keys_completions(Script, added_code, column, expected, skip_pre_python35):
|
||||
code = dedent(r'''
|
||||
ints = {1: ''}
|
||||
ints[50] = 3.0
|
||||
strs = {'asdf': 1, u"""foo""": 2, r'fbar': 3}
|
||||
mixed = {1: 2, 1.10: 4, None: 6, r'a\sdf': 8, b'foo': 9}
|
||||
casted = dict(strs, f3=4, r'\\xyz')
|
||||
casted_mod = dict(casted)
|
||||
casted_mod["fuuu"] = 8
|
||||
casted_mod["full"] = 8
|
||||
keywords = {None: 1, False: 2, "a": 3}
|
||||
''')
|
||||
line = None
|
||||
comps = Script(code + added_code).complete(line=line, column=column)
|
||||
if Ellipsis in expected:
|
||||
# This means that global completions are part of this, so filter all of
|
||||
# that out.
|
||||
comps = [c for c in comps if not c._name.is_value_name and not c.is_keyword]
|
||||
expected = [e for e in expected if e is not Ellipsis]
|
||||
|
||||
assert [c.complete for c in comps] == expected
|
||||
|
||||
|
||||
def test_start_match():
|
||||
assert start_match('Condition', 'C')
|
||||
|
||||
@@ -286,4 +365,4 @@ def test_fuzzy_match():
|
||||
|
||||
|
||||
def test_ellipsis_completion(Script):
|
||||
assert Script('...').completions() == []
|
||||
assert Script('...').complete() == []
|
||||
|
||||
@@ -575,3 +575,50 @@ def test_param_annotation_completion(class_is_findable):
|
||||
code = 'def CallFoo(x: Foo):\n x.ba'
|
||||
def_, = jedi.Interpreter(code, [locals()]).complete()
|
||||
assert def_.name == 'bar'
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.version_info[0] == 2, reason="Ignore Python 2, because EOL")
|
||||
@pytest.mark.parametrize(
|
||||
'code, column, expected', [
|
||||
('strs[', 5, ["'asdf'", "'fbar'", "'foo'", Ellipsis]),
|
||||
('strs[]', 5, ["'asdf'", "'fbar'", "'foo'", Ellipsis]),
|
||||
("strs['", 6, ["asdf'", "fbar'", "foo'"]),
|
||||
("strs[']", 6, ["asdf'", "fbar'", "foo'"]),
|
||||
('strs["]', 6, ['asdf"', 'fbar"', 'foo"']),
|
||||
|
||||
('mixed[', 6, [r"'a\\sdf'", '1', '1.1', "b'foo'", Ellipsis]),
|
||||
('mixed[1', 7, ['', '.1']),
|
||||
('mixed[Non', 9, ['e']),
|
||||
|
||||
('implicit[10', None, ['00']),
|
||||
]
|
||||
)
|
||||
def test_dict_completion(code, column, expected):
|
||||
strs = {'asdf': 1, u"""foo""": 2, r'fbar': 3}
|
||||
mixed = {1: 2, 1.10: 4, None: 6, r'a\sdf': 8, b'foo': 9}
|
||||
|
||||
namespaces = [locals(), {'implicit': {1000: 3}}]
|
||||
comps = jedi.Interpreter(code, namespaces).complete(column=column)
|
||||
if Ellipsis in expected:
|
||||
# This means that global completions are part of this, so filter all of
|
||||
# that out.
|
||||
comps = [c for c in comps if not c._name.is_value_name and not c.is_keyword]
|
||||
expected = [e for e in expected if e is not Ellipsis]
|
||||
|
||||
assert [c.complete for c in comps] == expected
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.version_info[0] == 2, reason="Ignore Python 2, because EOL")
|
||||
@pytest.mark.parametrize(
|
||||
'code, types', [
|
||||
('dct[1]', ['int']),
|
||||
('dct["asdf"]', ['float']),
|
||||
('dct[r"asdf"]', ['float']),
|
||||
('dct["a"]', ['float', 'int']),
|
||||
]
|
||||
)
|
||||
def test_dict_getitem(code, types):
|
||||
dct = {1: 2, "asdf": 1.0}
|
||||
|
||||
comps = jedi.Interpreter(code, [locals()]).infer()
|
||||
assert [c.name for c in comps] == types
|
||||
|
||||
@@ -88,7 +88,7 @@ def test_multiple_docstrings(Script):
|
||||
|
||||
|
||||
def test_completion(Script):
|
||||
assert Script('''
|
||||
assert not Script('''
|
||||
class DocstringCompletion():
|
||||
#? []
|
||||
""" asdfas """''').complete()
|
||||
|
||||
Reference in New Issue
Block a user