diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 188d3e34..e306eb50 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,8 +7,8 @@ jobs: strategy: matrix: os: [ubuntu-20.04, windows-2019] - python-version: [3.9, 3.8, 3.7, 3.6] - environment: ['3.8', '3.9', '3.7', '3.6', 'interpreter'] + python-version: ["3.10", "3.9", "3.8", "3.7", "3.6"] + environment: ['3.8', '3.10', '3.9', '3.7', '3.6', 'interpreter'] steps: - name: Checkout code uses: actions/checkout@v2 diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3cc23d43..52a6e792 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,7 +6,11 @@ Changelog Unreleased ++++++++++ +0.18.1 (2021-11-17) ++++++++++++++++++++ + - Implict namespaces are now a separate types in ``Name().type`` +- Python 3.10 support 0.18.0 (2020-12-25) +++++++++++++++++++ diff --git a/jedi/api/classes.py b/jedi/api/classes.py index fba9b5ad..ee741c33 100644 --- a/jedi/api/classes.py +++ b/jedi/api/classes.py @@ -27,7 +27,7 @@ from jedi.inference.compiled.mixed import MixedName from jedi.inference.names import ImportName, SubModuleName from jedi.inference.gradual.stub_value import StubModuleValue from jedi.inference.gradual.conversion import convert_names, convert_values -from jedi.inference.base_value import ValueSet +from jedi.inference.base_value import ValueSet, HasNoContext from jedi.api.keywords import KeywordName from jedi.api import completion_cache from jedi.api.helpers import filter_follow_imports @@ -37,13 +37,17 @@ def _sort_names_by_start_pos(names): return sorted(names, key=lambda s: s.start_pos or (0, 0)) -def defined_names(inference_state, context): +def defined_names(inference_state, value): """ List sub-definitions (e.g., methods in class). :type scope: Scope :rtype: list of Name """ + try: + context = value.as_context() + except HasNoContext: + return [] filter = next(context.get_filters()) names = [name for name in filter.values()] return [Name(inference_state, n) for n in _sort_names_by_start_pos(names)] @@ -759,7 +763,7 @@ class Name(BaseName): """ defs = self._name.infer() return sorted( - unite(defined_names(self._inference_state, d.as_context()) for d in defs), + unite(defined_names(self._inference_state, d) for d in defs), key=lambda s: s._name.start_pos or (0, 0) ) diff --git a/jedi/api/completion.py b/jedi/api/completion.py index a2ac4d04..342f7506 100644 --- a/jedi/api/completion.py +++ b/jedi/api/completion.py @@ -195,7 +195,6 @@ class Completion: - In args: */**: no completion - In params (also lambda): no completion before = """ - grammar = self._inference_state.grammar self.stack = stack = None self._position = ( @@ -278,6 +277,10 @@ class Completion: ) elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.': dot = self._module_node.get_leaf_for_position(self._position) + if dot.type == "endmarker": + # This is a bit of a weird edge case, maybe we can somehow + # generalize this. + dot = leaf.get_previous_leaf() cached_name, n = self._complete_trailer(dot.get_previous_leaf()) completion_names += n elif self._is_parameter_completion(): diff --git a/jedi/api/environment.py b/jedi/api/environment.py index 3f1f238f..aea96c47 100644 --- a/jedi/api/environment.py +++ b/jedi/api/environment.py @@ -17,7 +17,7 @@ import parso _VersionInfo = namedtuple('VersionInfo', 'major minor micro') -_SUPPORTED_PYTHONS = ['3.9', '3.8', '3.7', '3.6'] +_SUPPORTED_PYTHONS = ['3.10', '3.9', '3.8', '3.7', '3.6'] _SAFE_PATHS = ['/usr/bin', '/usr/local/bin'] _CONDA_VAR = 'CONDA_PREFIX' _CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor) diff --git a/jedi/inference/base_value.py b/jedi/inference/base_value.py index e51e063c..31b72937 100644 --- a/jedi/inference/base_value.py +++ b/jedi/inference/base_value.py @@ -22,6 +22,10 @@ from jedi.cache import memoize_method sentinel = object() +class HasNoContext(Exception): + pass + + class HelperValueMixin: def get_root_context(self): value = self @@ -261,7 +265,7 @@ class Value(HelperValueMixin): return self.parent_context.is_stub() def _as_context(self): - raise NotImplementedError('Not all values need to be converted to contexts: %s', self) + raise HasNoContext @property def name(self): diff --git a/jedi/inference/names.py b/jedi/inference/names.py index f90f9749..f446deb9 100644 --- a/jedi/inference/names.py +++ b/jedi/inference/names.py @@ -341,6 +341,12 @@ class TreeNameDefinition(AbstractTreeName): def py__doc__(self): api_type = self.api_type if api_type in ('function', 'class', 'property'): + if self.parent_context.get_root_context().is_stub(): + from jedi.inference.gradual.conversion import convert_names + names = convert_names([self], prefer_stub_to_compiled=False) + if self not in names: + return _merge_name_docs(names) + # Make sure the names are not TreeNameDefinitions anymore. return clean_scope_docstring(self.tree_name.get_definition()) @@ -408,6 +414,9 @@ class ParamNameInterface(_ParamMixin): return 2 return 0 + def infer_default(self): + return NO_VALUES + class BaseTreeParamName(ParamNameInterface, AbstractTreeName): annotation_node = None diff --git a/jedi/inference/value/iterable.py b/jedi/inference/value/iterable.py index 2f970fe8..7cc37173 100644 --- a/jedi/inference/value/iterable.py +++ b/jedi/inference/value/iterable.py @@ -342,6 +342,8 @@ class SequenceLiteralValue(Sequence): else: with reraise_getitem_errors(TypeError, KeyError, IndexError): node = self.get_tree_entries()[index] + if node == ':' or node.type == 'subscript': + return NO_VALUES return self._defining_context.infer_node(node) def py__iter__(self, contextualized_node=None): @@ -407,16 +409,6 @@ class SequenceLiteralValue(Sequence): else: return [array_node] - def exact_key_items(self): - """ - Returns a generator of tuples like dict.items(), where the key is - resolved (as a string) and the values are still lazy values. - """ - for key_node, value in self.get_tree_entries(): - for key in self._defining_context.infer_node(key_node): - if is_string(key): - yield key.get_safe_value(), LazyTreeValue(self._defining_context, value) - def __repr__(self): return "<%s of %s>" % (self.__class__.__name__, self.atom) @@ -472,6 +464,16 @@ class DictLiteralValue(_DictMixin, SequenceLiteralValue, _DictKeyMixin): return ValueSet([FakeList(self.inference_state, lazy_values)]) + def exact_key_items(self): + """ + Returns a generator of tuples like dict.items(), where the key is + resolved (as a string) and the values are still lazy values. + """ + for key_node, value in self.get_tree_entries(): + for key in self._defining_context.infer_node(key_node): + if is_string(key): + yield key.get_safe_value(), LazyTreeValue(self._defining_context, value) + def _dict_values(self): return ValueSet.from_sets( self._defining_context.infer_node(v) diff --git a/jedi/plugins/pytest.py b/jedi/plugins/pytest.py index 0e196c72..c78bdb4f 100644 --- a/jedi/plugins/pytest.py +++ b/jedi/plugins/pytest.py @@ -31,7 +31,15 @@ def execute(callback): def infer_anonymous_param(func): def get_returns(value): if value.tree_node.annotation is not None: - return value.execute_with_values() + result = value.execute_with_values() + if any(v.name.get_qualified_names(include_module_names=True) + == ('typing', 'Generator') + for v in result): + return ValueSet.from_sets( + v.py__getattribute__('__next__').execute_annotation() + for v in result + ) + return result # In pytest we need to differentiate between generators and normal # returns. diff --git a/setup.py b/setup.py index 1f660a4d..e20d14c2 100755 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ setup(name='jedi', install_requires=['parso>=0.8.0,<0.9.0'], extras_require={ 'testing': [ - 'pytest<6.0.0', + 'pytest<7.0.0', # docopt for sith doctests 'docopt', # coloroma for colored debug output @@ -61,6 +61,7 @@ setup(name='jedi', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Text Editors :: Integrated Development Environments (IDE)', 'Topic :: Utilities', diff --git a/test/completion/arrays.py b/test/completion/arrays.py index 59b8f2fe..21437bce 100644 --- a/test/completion/arrays.py +++ b/test/completion/arrays.py @@ -44,6 +44,8 @@ b[int():] #? list() b[:] +#? int() +b[:, :-1] #? 3 b[:] @@ -67,6 +69,20 @@ class _StrangeSlice(): #? slice() _StrangeSlice()[1:2] +for x in b[:]: + #? int() + x + +for x in b[:, :-1]: + #? + x + +class Foo: + def __getitem__(self, item): + return item + +#? +Foo()[:, :-1][0] # ----------------- # iterable multiplication diff --git a/test/completion/docstring.py b/test/completion/docstring.py index 5160610f..b2d02239 100644 --- a/test/completion/docstring.py +++ b/test/completion/docstring.py @@ -284,6 +284,13 @@ def doctest_with_space(): import_issu """ +def doctest_issue_github_1748(): + """From GitHub #1748 + #? 10 [] + This. Al + """ + pass + def docstring_rst_identifiers(): """ diff --git a/test/completion/pytest.py b/test/completion/pytest.py index 3c648c6c..a900dcda 100644 --- a/test/completion/pytest.py +++ b/test/completion/pytest.py @@ -1,3 +1,5 @@ +from typing import Generator + import pytest from pytest import fixture @@ -169,3 +171,15 @@ def test_inheritance_fixture(inheritance_fixture, caplog): @pytest.fixture def caplog(caplog): yield caplog + +# ----------------- +# Generator with annotation +# ----------------- + +@pytest.fixture +def with_annot() -> Generator[float, None, None]: + pass + +def test_with_annot(inheritance_fixture, with_annot): + #? float() + with_annot diff --git a/test/test_api/test_completion.py b/test/test_api/test_completion.py index 8e5ec3b2..de46223e 100644 --- a/test/test_api/test_completion.py +++ b/test/test_api/test_completion.py @@ -457,3 +457,7 @@ def test_module_completions(Script, module): # Just make sure that there are no errors c.type c.docstring() + + +def test_whitespace_at_end_after_dot(Script): + assert 'strip' in [c.name for c in Script('str. ').complete()] diff --git a/test/test_api/test_documentation.py b/test/test_api/test_documentation.py index b86c68fc..4c09d612 100644 --- a/test/test_api/test_documentation.py +++ b/test/test_api/test_documentation.py @@ -37,6 +37,17 @@ def test_operator_doc(Script): assert len(d.docstring()) > 100 +@pytest.mark.parametrize( + 'code, help_part', [ + ('str', 'Create a new string object'), + ('str.strip', 'Return a copy of the string'), + ] +) +def test_stdlib_doc(Script, code, help_part): + h, = Script(code).help() + assert help_part in h.docstring(raw=True) + + def test_lambda(Script): d, = Script('lambda x: x').help(column=0) assert d.type == 'keyword' diff --git a/test/test_api/test_interpreter.py b/test/test_api/test_interpreter.py index e6232e05..131ec6c1 100644 --- a/test/test_api/test_interpreter.py +++ b/test/test_api/test_interpreter.py @@ -732,3 +732,10 @@ def test_complete_not_findable_class_source(): assert "ta" in [c.name for c in completions] assert "ta1" in [c.name for c in completions] + + +def test_param_infer_default(): + abs_sig, = jedi.Interpreter('abs(', [{'abs': abs}]).get_signatures() + param, = abs_sig.params + assert param.name == 'x' + assert param.infer_default() == [] diff --git a/test/test_api/test_names.py b/test/test_api/test_names.py index d43a29bb..287a301e 100644 --- a/test/test_api/test_names.py +++ b/test/test_api/test_names.py @@ -189,3 +189,9 @@ def test_no_error(get_names): def test_is_side_effect(get_names, code, index, is_side_effect): names = get_names(code, references=True, all_scopes=True) assert names[index].is_side_effect() == is_side_effect + + +def test_no_defined_names(get_names): + definition, = get_names("x = (1, 2)") + + assert not definition.defined_names() diff --git a/test/test_utils.py b/test/test_utils.py index c6198401..0dcf80db 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -85,7 +85,7 @@ class TestSetupReadline(unittest.TestCase): } # There are quite a few differences, because both Windows and Linux # (posix and nt) librariesare included. - assert len(difference) < 15 + assert len(difference) < 30 def test_local_import(self): s = 'import test.test_utils'