mirror of
https://github.com/davidhalter/jedi.git
synced 2026-02-03 21:52:43 +08:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
354dab9503 |
31
.github/workflows/ci.yml
vendored
31
.github/workflows/ci.yml
vendored
@@ -1,44 +1,45 @@
|
|||||||
name: ci
|
name: ci
|
||||||
on: [push, pull_request, workflow_dispatch]
|
on: push
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-24.04, windows-2022]
|
os: [ubuntu-20.04, windows-2019]
|
||||||
python-version: ["3.13", "3.12", "3.11", "3.10", "3.9", "3.8"]
|
python-version: [3.9, 3.8, 3.7, 3.6]
|
||||||
environment: ['3.8', '3.13', '3.12', '3.11', '3.10', '3.9', 'interpreter']
|
environment: ['3.8', '3.9', '3.7', '3.6', 'interpreter']
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v2
|
||||||
if: ${{ matrix.environment != 'interpreter' }}
|
if: ${{ matrix.environment != 'interpreter' }}
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.environment }}
|
python-version: ${{ matrix.environment }}
|
||||||
allow-prereleases: true
|
|
||||||
|
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
allow-prereleases: true
|
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: 'pip install .[testing]'
|
run: 'pip install .[testing]'
|
||||||
|
|
||||||
|
- name: Setup tmate session
|
||||||
|
uses: mxschmitt/action-tmate@v3
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: python -m pytest
|
run: python -m pytest
|
||||||
env:
|
env:
|
||||||
JEDI_TEST_ENVIRONMENT: ${{ matrix.environment }}
|
JEDI_TEST_ENVIRONMENT: ${{ matrix.environment }}
|
||||||
|
|
||||||
code-quality:
|
code-quality:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
@@ -47,15 +48,15 @@ jobs:
|
|||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
python -m flake8 jedi test setup.py
|
python -m flake8 jedi setup.py
|
||||||
python -m mypy jedi sith.py setup.py
|
python -m mypy jedi sith.py
|
||||||
|
|
||||||
coverage:
|
coverage:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-20.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -14,4 +14,3 @@ record.json
|
|||||||
/.pytest_cache
|
/.pytest_cache
|
||||||
/.mypy_cache
|
/.mypy_cache
|
||||||
/venv/
|
/venv/
|
||||||
.nvimrc
|
|
||||||
|
|||||||
@@ -1,21 +1,2 @@
|
|||||||
version: 2
|
|
||||||
|
|
||||||
python:
|
python:
|
||||||
install:
|
pip_install: true
|
||||||
- method: pip
|
|
||||||
path: .
|
|
||||||
extra_requirements:
|
|
||||||
- docs
|
|
||||||
|
|
||||||
submodules:
|
|
||||||
include: all
|
|
||||||
|
|
||||||
sphinx:
|
|
||||||
configuration: docs/conf.py
|
|
||||||
|
|
||||||
build:
|
|
||||||
os: ubuntu-22.04
|
|
||||||
tools:
|
|
||||||
python: "3.11"
|
|
||||||
apt_packages:
|
|
||||||
- graphviz
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
Main Authors
|
Main Authors
|
||||||
------------
|
------------
|
||||||
|
|
||||||
- David Halter (@davidhalter) <davidhalter88@gmail.com>
|
- David Halter (@davidhalter) <davidhalter88@gmail.com>
|
||||||
@@ -61,11 +61,6 @@ Code Contributors
|
|||||||
- Vladislav Serebrennikov (@endilll)
|
- Vladislav Serebrennikov (@endilll)
|
||||||
- Andrii Kolomoiets (@muffinmad)
|
- Andrii Kolomoiets (@muffinmad)
|
||||||
- Leo Ryu (@Leo-Ryu)
|
- Leo Ryu (@Leo-Ryu)
|
||||||
- Joseph Birkner (@josephbirkner)
|
|
||||||
- Márcio Mazza (@marciomazza)
|
|
||||||
- Martin Vielsmaier (@moser) <martin@vielsmaier.net>
|
|
||||||
- TingJia Wu (@WutingjiaX) <wutingjia@bytedance.com>
|
|
||||||
- Nguyễn Hồng Quân <ng.hong.quan@gmail.com>
|
|
||||||
|
|
||||||
And a few more "anonymous" contributors.
|
And a few more "anonymous" contributors.
|
||||||
|
|
||||||
|
|||||||
@@ -6,41 +6,7 @@ Changelog
|
|||||||
Unreleased
|
Unreleased
|
||||||
++++++++++
|
++++++++++
|
||||||
|
|
||||||
0.19.2 (2024-11-10)
|
|
||||||
+++++++++++++++++++
|
|
||||||
|
|
||||||
- Python 3.13 support
|
|
||||||
|
|
||||||
0.19.1 (2023-10-02)
|
|
||||||
+++++++++++++++++++
|
|
||||||
|
|
||||||
- Python 3.12 support (Thanks Peter!)
|
|
||||||
|
|
||||||
0.19.0 (2023-07-29)
|
|
||||||
+++++++++++++++++++
|
|
||||||
|
|
||||||
- Python 3.11 support
|
|
||||||
- Massive improvements in performance for ``Interpreter`` (e.g. IPython) users.
|
|
||||||
This especially affects ``pandas`` users with large datasets.
|
|
||||||
- Add ``jedi.settings.allow_unsafe_interpreter_executions`` to make it easier
|
|
||||||
for IPython users to avoid unsafe executions.
|
|
||||||
|
|
||||||
0.18.2 (2022-11-21)
|
|
||||||
+++++++++++++++++++
|
|
||||||
|
|
||||||
- Added dataclass-equivalent for attrs.define
|
|
||||||
- Find fixtures from Pytest entrypoints; Examples of pytest plugins installed
|
|
||||||
like this are pytest-django, pytest-sugar and Faker.
|
|
||||||
- Fixed Project.search, when a venv was involved, which is why for example
|
|
||||||
`:Pyimport django.db` did not work in some cases in jedi-vim.
|
|
||||||
- And many smaller bugfixes
|
|
||||||
|
|
||||||
0.18.1 (2021-11-17)
|
|
||||||
+++++++++++++++++++
|
|
||||||
|
|
||||||
- Implict namespaces are now a separate types in ``Name().type``
|
- Implict namespaces are now a separate types in ``Name().type``
|
||||||
- Python 3.10 support
|
|
||||||
- Mostly bugfixes
|
|
||||||
|
|
||||||
0.18.0 (2020-12-25)
|
0.18.0 (2020-12-25)
|
||||||
+++++++++++++++++++
|
+++++++++++++++++++
|
||||||
|
|||||||
12
README.rst
12
README.rst
@@ -2,9 +2,6 @@
|
|||||||
Jedi - an awesome autocompletion, static analysis and refactoring library for Python
|
Jedi - an awesome autocompletion, static analysis and refactoring library for Python
|
||||||
####################################################################################
|
####################################################################################
|
||||||
|
|
||||||
**I released the successor to Jedi: A
|
|
||||||
Mypy-Compatible Python Language Server Built in Rust** - `Zuban <https://github.com/zubanls/zuban>`_
|
|
||||||
|
|
||||||
.. image:: http://isitmaintained.com/badge/open/davidhalter/jedi.svg
|
.. image:: http://isitmaintained.com/badge/open/davidhalter/jedi.svg
|
||||||
:target: https://github.com/davidhalter/jedi/issues
|
:target: https://github.com/davidhalter/jedi/issues
|
||||||
:alt: The percentage of open issues and pull requests
|
:alt: The percentage of open issues and pull requests
|
||||||
@@ -13,7 +10,7 @@ Mypy-Compatible Python Language Server Built in Rust** - `Zuban <https://github.
|
|||||||
:target: https://github.com/davidhalter/jedi/issues
|
:target: https://github.com/davidhalter/jedi/issues
|
||||||
:alt: The resolution time is the median time an issue or pull request stays open.
|
:alt: The resolution time is the median time an issue or pull request stays open.
|
||||||
|
|
||||||
.. image:: https://github.com/davidhalter/jedi/actions/workflows/ci.yml/badge.svg?branch=master
|
.. image:: https://github.com/davidhalter/jedi/workflows/ci/badge.svg?branch=master
|
||||||
:target: https://github.com/davidhalter/jedi/actions
|
:target: https://github.com/davidhalter/jedi/actions
|
||||||
:alt: Tests
|
:alt: Tests
|
||||||
|
|
||||||
@@ -45,7 +42,7 @@ Jedi can currently be used with the following editors/projects:
|
|||||||
- `GNOME Builder`_ (with support for GObject Introspection)
|
- `GNOME Builder`_ (with support for GObject Introspection)
|
||||||
- Gedit (gedi_)
|
- Gedit (gedi_)
|
||||||
- wdb_ - Web Debugger
|
- wdb_ - Web Debugger
|
||||||
- `Eric IDE`_
|
- `Eric IDE`_ (Available as a plugin)
|
||||||
- `IPython 6.0.0+ <https://ipython.readthedocs.io/en/stable/whatsnew/version6.html>`_
|
- `IPython 6.0.0+ <https://ipython.readthedocs.io/en/stable/whatsnew/version6.html>`_
|
||||||
- `xonsh shell <https://xon.sh/contents.html>`_ has `jedi extension <https://xon.sh/xontribs.html#jedi>`_
|
- `xonsh shell <https://xon.sh/contents.html>`_ has `jedi extension <https://xon.sh/xontribs.html#jedi>`_
|
||||||
|
|
||||||
@@ -54,8 +51,7 @@ and many more!
|
|||||||
There are a few language servers that use Jedi:
|
There are a few language servers that use Jedi:
|
||||||
|
|
||||||
- `jedi-language-server <https://github.com/pappasam/jedi-language-server>`_
|
- `jedi-language-server <https://github.com/pappasam/jedi-language-server>`_
|
||||||
- `python-language-server <https://github.com/palantir/python-language-server>`_ (currently unmaintained)
|
- `python-language-server <https://github.com/palantir/python-language-server>`_
|
||||||
- `python-lsp-server <https://github.com/python-lsp/python-lsp-server>`_ (fork from python-language-server)
|
|
||||||
- `anakin-language-server <https://github.com/muffinmad/anakin-language-server>`_
|
- `anakin-language-server <https://github.com/muffinmad/anakin-language-server>`_
|
||||||
|
|
||||||
Here are some pictures taken from jedi-vim_:
|
Here are some pictures taken from jedi-vim_:
|
||||||
@@ -102,7 +98,7 @@ Features and Limitations
|
|||||||
Jedi's features are listed here:
|
Jedi's features are listed here:
|
||||||
`Features <https://jedi.readthedocs.org/en/latest/docs/features.html>`_.
|
`Features <https://jedi.readthedocs.org/en/latest/docs/features.html>`_.
|
||||||
|
|
||||||
You can run Jedi on Python 3.8+ but it should also
|
You can run Jedi on Python 3.6+ but it should also
|
||||||
understand code that is older than those versions. Additionally you should be
|
understand code that is older than those versions. Additionally you should be
|
||||||
able to use `Virtualenvs <https://jedi.readthedocs.org/en/latest/docs/api.html#environments>`_
|
able to use `Virtualenvs <https://jedi.readthedocs.org/en/latest/docs/api.html#environments>`_
|
||||||
very well.
|
very well.
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
# Security Policy
|
|
||||||
|
|
||||||
If security issues arise, we will try to fix those as soon as possible.
|
|
||||||
|
|
||||||
Due to Jedi's nature, Security Issues will probably be extremely rare, but we will of course treat them seriously.
|
|
||||||
|
|
||||||
## Reporting Security Problems
|
|
||||||
|
|
||||||
If you need to report a security vulnerability, please send an email to davidhalter88@gmail.com. Typically, I will respond in the next few business days.
|
|
||||||
@@ -156,14 +156,6 @@ def jedi_path():
|
|||||||
return os.path.dirname(__file__)
|
return os.path.dirname(__file__)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
|
||||||
def skip_pre_python311(environment):
|
|
||||||
if environment.version_info < (3, 11):
|
|
||||||
# This if is just needed to avoid that tests ever skip way more than
|
|
||||||
# they should for all Python versions.
|
|
||||||
pytest.skip()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
def skip_pre_python38(environment):
|
def skip_pre_python38(environment):
|
||||||
if environment.version_info < (3, 8):
|
if environment.version_info < (3, 8):
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ to write my own version of a completion engine.
|
|||||||
The first idea was to execute non-dangerous code. But I soon realized, that
|
The first idea was to execute non-dangerous code. But I soon realized, that
|
||||||
this would not work. So I started to build a static analysis tool.
|
this would not work. So I started to build a static analysis tool.
|
||||||
The biggest problem that I had at the time was that I did not know a thing
|
The biggest problem that I had at the time was that I did not know a thing
|
||||||
about parsers. I did not even know the word static analysis. It turns
|
about parsers.I did not did not even know the word static analysis. It turns
|
||||||
out they are the foundation of a good static analysis tool. I of course did not
|
out they are the foundation of a good static analysis tool. I of course did not
|
||||||
know that and tried to write my own poor version of a parser that I ended up
|
know that and tried to write my own poor version of a parser that I ended up
|
||||||
throwing away two years later.
|
throwing away two years later.
|
||||||
@@ -53,7 +53,7 @@ quick and is pretty much feature complete.
|
|||||||
|
|
||||||
--------
|
--------
|
||||||
|
|
||||||
I will leave you with a small anecdote that happened in 2012, if I remember
|
I will leave you with a small annectote that happend in 2012, if I remember
|
||||||
correctly. After I explained Guido van Rossum, how some parts of my
|
correctly. After I explained Guido van Rossum, how some parts of my
|
||||||
auto-completion work, he said:
|
auto-completion work, he said:
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ Jedi's main API calls and features are:
|
|||||||
Basic Features
|
Basic Features
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
- Python 3.8+ support
|
- Python 3.6+ support
|
||||||
- Ignores syntax errors and wrong indentation
|
- Ignores syntax errors and wrong indentation
|
||||||
- Can deal with complex module / function / class structures
|
- Can deal with complex module / function / class structures
|
||||||
- Great ``virtualenv``/``venv`` support
|
- Great ``virtualenv``/``venv`` support
|
||||||
@@ -57,7 +57,7 @@ Supported Python Features
|
|||||||
Limitations
|
Limitations
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
In general Jedi's limit is quite high, but for very big projects or very
|
In general Jedi's limit are quite high, but for very big projects or very
|
||||||
complex code, sometimes Jedi intentionally stops type inference, to avoid
|
complex code, sometimes Jedi intentionally stops type inference, to avoid
|
||||||
hanging for a long time.
|
hanging for a long time.
|
||||||
|
|
||||||
@@ -77,7 +77,7 @@ Performance Issues
|
|||||||
|
|
||||||
Importing ``numpy`` can be quite slow sometimes, as well as loading the
|
Importing ``numpy`` can be quite slow sometimes, as well as loading the
|
||||||
builtins the first time. If you want to speed things up, you could preload
|
builtins the first time. If you want to speed things up, you could preload
|
||||||
libraries in |jedi|, with :func:`.preload_module`. However, once loaded, this
|
libriaries in |jedi|, with :func:`.preload_module`. However, once loaded, this
|
||||||
should not be a problem anymore. The same is true for huge modules like
|
should not be a problem anymore. The same is true for huge modules like
|
||||||
``PySide``, ``wx``, ``tensorflow``, ``pandas``, etc.
|
``PySide``, ``wx``, ``tensorflow``, ``pandas``, etc.
|
||||||
|
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ using pip::
|
|||||||
|
|
||||||
If you want to install the current development version (master branch)::
|
If you want to install the current development version (master branch)::
|
||||||
|
|
||||||
sudo pip install -e git+https://github.com/davidhalter/jedi.git#egg=jedi
|
sudo pip install -e git://github.com/davidhalter/jedi.git#egg=jedi
|
||||||
|
|
||||||
|
|
||||||
System-wide installation via a package manager
|
System-wide installation via a package manager
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ Using Jedi
|
|||||||
==========
|
==========
|
||||||
|
|
||||||
|jedi| is can be used with a variety of :ref:`plugins <editor-plugins>`,
|
|jedi| is can be used with a variety of :ref:`plugins <editor-plugins>`,
|
||||||
:ref:`language servers <language-servers>` and other software.
|
`language servers <language-servers>` and other software.
|
||||||
It is also possible to use |jedi| in the :ref:`Python shell or with IPython
|
It is also possible to use |jedi| in the :ref:`Python shell or with IPython
|
||||||
<repl-completion>`.
|
<repl-completion>`.
|
||||||
|
|
||||||
@@ -16,8 +16,7 @@ Language Servers
|
|||||||
--------------
|
--------------
|
||||||
|
|
||||||
- `jedi-language-server <https://github.com/pappasam/jedi-language-server>`_
|
- `jedi-language-server <https://github.com/pappasam/jedi-language-server>`_
|
||||||
- `python-language-server <https://github.com/palantir/python-language-server>`_ (currently unmaintained)
|
- `python-language-server <https://github.com/palantir/python-language-server>`_
|
||||||
- `python-lsp-server <https://github.com/python-lsp/python-lsp-server>`_ (fork from python-language-server)
|
|
||||||
- `anakin-language-server <https://github.com/muffinmad/anakin-language-server>`_
|
- `anakin-language-server <https://github.com/muffinmad/anakin-language-server>`_
|
||||||
|
|
||||||
.. _editor-plugins:
|
.. _editor-plugins:
|
||||||
@@ -87,7 +86,7 @@ Gedit
|
|||||||
Eric IDE
|
Eric IDE
|
||||||
~~~~~~~~
|
~~~~~~~~
|
||||||
|
|
||||||
- `Eric IDE`_
|
- `Eric IDE`_ (Available as a plugin)
|
||||||
|
|
||||||
Web Debugger
|
Web Debugger
|
||||||
~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ ad
|
|||||||
load
|
load
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = '0.19.2'
|
__version__ = '0.18.0'
|
||||||
|
|
||||||
from jedi.api import Script, Interpreter, set_debug_function, preload_module
|
from jedi.api import Script, Interpreter, set_debug_function, preload_module
|
||||||
from jedi import settings
|
from jedi import settings
|
||||||
|
|||||||
@@ -5,24 +5,27 @@ different Python versions.
|
|||||||
import errno
|
import errno
|
||||||
import sys
|
import sys
|
||||||
import pickle
|
import pickle
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
|
|
||||||
class Unpickler(pickle.Unpickler):
|
def cast_path(string):
|
||||||
def find_class(self, module: str, name: str) -> Any:
|
"""
|
||||||
# Python 3.13 moved pathlib implementation out of __init__.py as part of
|
Take a bytes or str path and cast it to unicode.
|
||||||
# generalising its implementation. Ensure that we support loading
|
|
||||||
# pickles from 3.13 on older version of Python. Since 3.13 maintained a
|
Apparently it is perfectly fine to pass both byte and unicode objects into
|
||||||
# compatible API, pickles from older Python work natively on the newer
|
the sys.path. This probably means that byte paths are normal at other
|
||||||
# version.
|
places as well.
|
||||||
if module == 'pathlib._local':
|
|
||||||
module = 'pathlib'
|
Since this just really complicates everything and Python 2.7 will be EOL
|
||||||
return super().find_class(module, name)
|
soon anyway, just go with always strings.
|
||||||
|
"""
|
||||||
|
if isinstance(string, bytes):
|
||||||
|
return str(string, encoding='UTF-8', errors='replace')
|
||||||
|
return str(string)
|
||||||
|
|
||||||
|
|
||||||
def pickle_load(file):
|
def pickle_load(file):
|
||||||
try:
|
try:
|
||||||
return Unpickler(file).load()
|
return pickle.load(file)
|
||||||
# Python on Windows don't throw EOF errors for pipes. So reraise them with
|
# Python on Windows don't throw EOF errors for pipes. So reraise them with
|
||||||
# the correct type, which is caught upwards.
|
# the correct type, which is caught upwards.
|
||||||
except OSError:
|
except OSError:
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ from pathlib import Path
|
|||||||
import parso
|
import parso
|
||||||
from parso.python import tree
|
from parso.python import tree
|
||||||
|
|
||||||
|
from jedi._compatibility import cast_path
|
||||||
from jedi.parser_utils import get_executable_nodes
|
from jedi.parser_utils import get_executable_nodes
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi import settings
|
from jedi import settings
|
||||||
@@ -99,15 +100,13 @@ class Script:
|
|||||||
"""
|
"""
|
||||||
def __init__(self, code=None, *, path=None, environment=None, project=None):
|
def __init__(self, code=None, *, path=None, environment=None, project=None):
|
||||||
self._orig_path = path
|
self._orig_path = path
|
||||||
|
# An empty path (also empty string) should always result in no path.
|
||||||
if isinstance(path, str):
|
if isinstance(path, str):
|
||||||
path = Path(path)
|
path = Path(path)
|
||||||
|
|
||||||
self.path = path.absolute() if path else None
|
self.path = path.absolute() if path else None
|
||||||
|
|
||||||
if code is None:
|
if code is None:
|
||||||
if path is None:
|
|
||||||
raise ValueError("Must provide at least one of code or path")
|
|
||||||
|
|
||||||
# TODO add a better warning than the traceback!
|
# TODO add a better warning than the traceback!
|
||||||
with open(path, 'rb') as f:
|
with open(path, 'rb') as f:
|
||||||
code = f.read()
|
code = f.read()
|
||||||
@@ -153,7 +152,7 @@ class Script:
|
|||||||
if self.path is None:
|
if self.path is None:
|
||||||
file_io = None
|
file_io = None
|
||||||
else:
|
else:
|
||||||
file_io = KnownContentFileIO(self.path, self._code)
|
file_io = KnownContentFileIO(cast_path(self.path), self._code)
|
||||||
if self.path is not None and self.path.suffix == '.pyi':
|
if self.path is not None and self.path.suffix == '.pyi':
|
||||||
# We are in a stub file. Try to load the stub properly.
|
# We are in a stub file. Try to load the stub properly.
|
||||||
stub_module = load_proper_stub_module(
|
stub_module = load_proper_stub_module(
|
||||||
@@ -206,7 +205,6 @@ class Script:
|
|||||||
before magic methods and name mangled names that start with ``__``.
|
before magic methods and name mangled names that start with ``__``.
|
||||||
:rtype: list of :class:`.Completion`
|
:rtype: list of :class:`.Completion`
|
||||||
"""
|
"""
|
||||||
self._inference_state.reset_recursion_limitations()
|
|
||||||
with debug.increase_indent_cm('complete'):
|
with debug.increase_indent_cm('complete'):
|
||||||
completion = Completion(
|
completion = Completion(
|
||||||
self._inference_state, self._get_module_context(), self._code_lines,
|
self._inference_state, self._get_module_context(), self._code_lines,
|
||||||
@@ -231,7 +229,6 @@ class Script:
|
|||||||
:param prefer_stubs: Prefer stubs to Python objects for this method.
|
:param prefer_stubs: Prefer stubs to Python objects for this method.
|
||||||
:rtype: list of :class:`.Name`
|
:rtype: list of :class:`.Name`
|
||||||
"""
|
"""
|
||||||
self._inference_state.reset_recursion_limitations()
|
|
||||||
pos = line, column
|
pos = line, column
|
||||||
leaf = self._module_node.get_name_of_position(pos)
|
leaf = self._module_node.get_name_of_position(pos)
|
||||||
if leaf is None:
|
if leaf is None:
|
||||||
@@ -275,7 +272,6 @@ class Script:
|
|||||||
:param prefer_stubs: Prefer stubs to Python objects for this method.
|
:param prefer_stubs: Prefer stubs to Python objects for this method.
|
||||||
:rtype: list of :class:`.Name`
|
:rtype: list of :class:`.Name`
|
||||||
"""
|
"""
|
||||||
self._inference_state.reset_recursion_limitations()
|
|
||||||
tree_name = self._module_node.get_name_of_position((line, column))
|
tree_name = self._module_node.get_name_of_position((line, column))
|
||||||
if tree_name is None:
|
if tree_name is None:
|
||||||
# Without a name we really just want to jump to the result e.g.
|
# Without a name we really just want to jump to the result e.g.
|
||||||
@@ -368,17 +364,10 @@ class Script:
|
|||||||
|
|
||||||
:rtype: list of :class:`.Name`
|
:rtype: list of :class:`.Name`
|
||||||
"""
|
"""
|
||||||
self._inference_state.reset_recursion_limitations()
|
|
||||||
definitions = self.goto(line, column, follow_imports=True)
|
definitions = self.goto(line, column, follow_imports=True)
|
||||||
if definitions:
|
if definitions:
|
||||||
return definitions
|
return definitions
|
||||||
leaf = self._module_node.get_leaf_for_position((line, column))
|
leaf = self._module_node.get_leaf_for_position((line, column))
|
||||||
|
|
||||||
if leaf is not None and leaf.end_pos == (line, column) and leaf.type == 'newline':
|
|
||||||
next_ = leaf.get_next_leaf()
|
|
||||||
if next_ is not None and next_.start_pos == leaf.end_pos:
|
|
||||||
leaf = next_
|
|
||||||
|
|
||||||
if leaf is not None and leaf.type in ('keyword', 'operator', 'error_leaf'):
|
if leaf is not None and leaf.type in ('keyword', 'operator', 'error_leaf'):
|
||||||
def need_pydoc():
|
def need_pydoc():
|
||||||
if leaf.value in ('(', ')', '[', ']'):
|
if leaf.value in ('(', ')', '[', ']'):
|
||||||
@@ -410,7 +399,6 @@ class Script:
|
|||||||
the current module only.
|
the current module only.
|
||||||
:rtype: list of :class:`.Name`
|
:rtype: list of :class:`.Name`
|
||||||
"""
|
"""
|
||||||
self._inference_state.reset_recursion_limitations()
|
|
||||||
|
|
||||||
def _references(include_builtins=True, scope='project'):
|
def _references(include_builtins=True, scope='project'):
|
||||||
if scope not in ('project', 'file'):
|
if scope not in ('project', 'file'):
|
||||||
@@ -445,7 +433,6 @@ class Script:
|
|||||||
|
|
||||||
:rtype: list of :class:`.Signature`
|
:rtype: list of :class:`.Signature`
|
||||||
"""
|
"""
|
||||||
self._inference_state.reset_recursion_limitations()
|
|
||||||
pos = line, column
|
pos = line, column
|
||||||
call_details = helpers.get_signature_details(self._module_node, pos)
|
call_details = helpers.get_signature_details(self._module_node, pos)
|
||||||
if call_details is None:
|
if call_details is None:
|
||||||
@@ -483,7 +470,7 @@ class Script:
|
|||||||
|
|
||||||
module_context = self._get_module_context()
|
module_context = self._get_module_context()
|
||||||
|
|
||||||
n = leaf.search_ancestor('funcdef', 'classdef')
|
n = tree.search_ancestor(leaf, 'funcdef', 'classdef')
|
||||||
if n is not None and n.start_pos < pos <= n.children[-1].start_pos:
|
if n is not None and n.start_pos < pos <= n.children[-1].start_pos:
|
||||||
# This is a bit of a special case. The context of a function/class
|
# This is a bit of a special case. The context of a function/class
|
||||||
# name/param/keyword is always it's parent context, not the
|
# name/param/keyword is always it's parent context, not the
|
||||||
@@ -565,7 +552,6 @@ class Script:
|
|||||||
return parso_to_jedi_errors(self._inference_state.grammar, self._module_node)
|
return parso_to_jedi_errors(self._inference_state.grammar, self._module_node)
|
||||||
|
|
||||||
def _names(self, all_scopes=False, definitions=True, references=False):
|
def _names(self, all_scopes=False, definitions=True, references=False):
|
||||||
self._inference_state.reset_recursion_limitations()
|
|
||||||
# Set line/column to a random position, because they don't matter.
|
# Set line/column to a random position, because they don't matter.
|
||||||
module_context = self._get_module_context()
|
module_context = self._get_module_context()
|
||||||
defs = [
|
defs = [
|
||||||
@@ -594,7 +580,7 @@ class Script:
|
|||||||
@validate_line_column
|
@validate_line_column
|
||||||
def extract_variable(self, line, column, *, new_name, until_line=None, until_column=None):
|
def extract_variable(self, line, column, *, new_name, until_line=None, until_column=None):
|
||||||
"""
|
"""
|
||||||
Moves an expression to a new statement.
|
Moves an expression to a new statemenet.
|
||||||
|
|
||||||
For example if you have the cursor on ``foo`` and provide a
|
For example if you have the cursor on ``foo`` and provide a
|
||||||
``new_name`` called ``bar``::
|
``new_name`` called ``bar``::
|
||||||
@@ -721,8 +707,9 @@ class Interpreter(Script):
|
|||||||
:param namespaces: A list of namespace dictionaries such as the one
|
:param namespaces: A list of namespace dictionaries such as the one
|
||||||
returned by :func:`globals` and :func:`locals`.
|
returned by :func:`globals` and :func:`locals`.
|
||||||
"""
|
"""
|
||||||
|
_allow_descriptor_getattr_default = True
|
||||||
|
|
||||||
def __init__(self, code, namespaces, *, project=None, **kwds):
|
def __init__(self, code, namespaces, **kwds):
|
||||||
try:
|
try:
|
||||||
namespaces = [dict(n) for n in namespaces]
|
namespaces = [dict(n) for n in namespaces]
|
||||||
except Exception:
|
except Exception:
|
||||||
@@ -735,32 +722,16 @@ class Interpreter(Script):
|
|||||||
if not isinstance(environment, InterpreterEnvironment):
|
if not isinstance(environment, InterpreterEnvironment):
|
||||||
raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
|
raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
|
||||||
|
|
||||||
if project is None:
|
super().__init__(code, environment=environment,
|
||||||
project = Project(Path.cwd())
|
project=Project(Path.cwd()), **kwds)
|
||||||
|
|
||||||
super().__init__(code, environment=environment, project=project, **kwds)
|
|
||||||
|
|
||||||
self.namespaces = namespaces
|
self.namespaces = namespaces
|
||||||
self._inference_state.allow_unsafe_executions = \
|
self._inference_state.allow_descriptor_getattr = self._allow_descriptor_getattr_default
|
||||||
settings.allow_unsafe_interpreter_executions
|
|
||||||
# Dynamic params search is important when we work on functions that are
|
|
||||||
# called by other pieces of code. However for interpreter completions
|
|
||||||
# this is not important at all, because the current code is always new
|
|
||||||
# and will never be called by something.
|
|
||||||
# Also sometimes this logic goes a bit too far like in
|
|
||||||
# https://github.com/ipython/ipython/issues/13866, where it takes
|
|
||||||
# seconds to do a simple completion.
|
|
||||||
self._inference_state.do_dynamic_params_search = False
|
|
||||||
|
|
||||||
@cache.memoize_method
|
@cache.memoize_method
|
||||||
def _get_module_context(self):
|
def _get_module_context(self):
|
||||||
if self.path is None:
|
|
||||||
file_io = None
|
|
||||||
else:
|
|
||||||
file_io = KnownContentFileIO(self.path, self._code)
|
|
||||||
tree_module_value = ModuleValue(
|
tree_module_value = ModuleValue(
|
||||||
self._inference_state, self._module_node,
|
self._inference_state, self._module_node,
|
||||||
file_io=file_io,
|
file_io=KnownContentFileIO(str(self.path), self._code),
|
||||||
string_names=('__main__',),
|
string_names=('__main__',),
|
||||||
code_lines=self._code_lines,
|
code_lines=self._code_lines,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -17,6 +17,8 @@ import re
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
from parso.tree import search_ancestor
|
||||||
|
|
||||||
from jedi import settings
|
from jedi import settings
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi.inference.utils import unite
|
from jedi.inference.utils import unite
|
||||||
@@ -25,7 +27,7 @@ from jedi.inference.compiled.mixed import MixedName
|
|||||||
from jedi.inference.names import ImportName, SubModuleName
|
from jedi.inference.names import ImportName, SubModuleName
|
||||||
from jedi.inference.gradual.stub_value import StubModuleValue
|
from jedi.inference.gradual.stub_value import StubModuleValue
|
||||||
from jedi.inference.gradual.conversion import convert_names, convert_values
|
from jedi.inference.gradual.conversion import convert_names, convert_values
|
||||||
from jedi.inference.base_value import ValueSet, HasNoContext
|
from jedi.inference.base_value import ValueSet
|
||||||
from jedi.api.keywords import KeywordName
|
from jedi.api.keywords import KeywordName
|
||||||
from jedi.api import completion_cache
|
from jedi.api import completion_cache
|
||||||
from jedi.api.helpers import filter_follow_imports
|
from jedi.api.helpers import filter_follow_imports
|
||||||
@@ -35,17 +37,13 @@ def _sort_names_by_start_pos(names):
|
|||||||
return sorted(names, key=lambda s: s.start_pos or (0, 0))
|
return sorted(names, key=lambda s: s.start_pos or (0, 0))
|
||||||
|
|
||||||
|
|
||||||
def defined_names(inference_state, value):
|
def defined_names(inference_state, context):
|
||||||
"""
|
"""
|
||||||
List sub-definitions (e.g., methods in class).
|
List sub-definitions (e.g., methods in class).
|
||||||
|
|
||||||
:type scope: Scope
|
:type scope: Scope
|
||||||
:rtype: list of Name
|
:rtype: list of Name
|
||||||
"""
|
"""
|
||||||
try:
|
|
||||||
context = value.as_context()
|
|
||||||
except HasNoContext:
|
|
||||||
return []
|
|
||||||
filter = next(context.get_filters())
|
filter = next(context.get_filters())
|
||||||
names = [name for name in filter.values()]
|
names = [name for name in filter.values()]
|
||||||
return [Name(inference_state, n) for n in _sort_names_by_start_pos(names)]
|
return [Name(inference_state, n) for n in _sort_names_by_start_pos(names)]
|
||||||
@@ -103,7 +101,8 @@ class BaseName:
|
|||||||
# Compiled modules should not return a module path even if they
|
# Compiled modules should not return a module path even if they
|
||||||
# have one.
|
# have one.
|
||||||
path: Optional[Path] = self._get_module_context().py__file__()
|
path: Optional[Path] = self._get_module_context().py__file__()
|
||||||
return path
|
if path is not None:
|
||||||
|
return path
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -507,7 +506,7 @@ class BaseName:
|
|||||||
# - param: The parent_context of a param is not its function but
|
# - param: The parent_context of a param is not its function but
|
||||||
# e.g. the outer class or module.
|
# e.g. the outer class or module.
|
||||||
cls_or_func_node = self._name.tree_name.get_definition()
|
cls_or_func_node = self._name.tree_name.get_definition()
|
||||||
parent = cls_or_func_node.search_ancestor('funcdef', 'classdef', 'file_input')
|
parent = search_ancestor(cls_or_func_node, 'funcdef', 'classdef', 'file_input')
|
||||||
context = self._get_module_context().create_value(parent).as_context()
|
context = self._get_module_context().create_value(parent).as_context()
|
||||||
else:
|
else:
|
||||||
context = self._name.parent_context
|
context = self._name.parent_context
|
||||||
@@ -760,7 +759,7 @@ class Name(BaseName):
|
|||||||
"""
|
"""
|
||||||
defs = self._name.infer()
|
defs = self._name.infer()
|
||||||
return sorted(
|
return sorted(
|
||||||
unite(defined_names(self._inference_state, d) for d in defs),
|
unite(defined_names(self._inference_state, d.as_context()) for d in defs),
|
||||||
key=lambda s: s._name.start_pos or (0, 0)
|
key=lambda s: s._name.start_pos or (0, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from inspect import Parameter
|
|||||||
|
|
||||||
from parso.python.token import PythonTokenTypes
|
from parso.python.token import PythonTokenTypes
|
||||||
from parso.python import tree
|
from parso.python import tree
|
||||||
from parso.tree import Leaf
|
from parso.tree import search_ancestor, Leaf
|
||||||
from parso import split_lines
|
from parso import split_lines
|
||||||
|
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
@@ -18,8 +18,7 @@ from jedi.inference import imports
|
|||||||
from jedi.inference.base_value import ValueSet
|
from jedi.inference.base_value import ValueSet
|
||||||
from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names
|
from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names
|
||||||
from jedi.inference.context import get_global_filters
|
from jedi.inference.context import get_global_filters
|
||||||
from jedi.inference.value import TreeInstance
|
from jedi.inference.value import TreeInstance, ModuleValue
|
||||||
from jedi.inference.docstring_utils import DocstringModule
|
|
||||||
from jedi.inference.names import ParamNameWrapper, SubModuleName
|
from jedi.inference.names import ParamNameWrapper, SubModuleName
|
||||||
from jedi.inference.gradual.conversion import convert_values, convert_names
|
from jedi.inference.gradual.conversion import convert_values, convert_names
|
||||||
from jedi.parser_utils import cut_value_at_position
|
from jedi.parser_utils import cut_value_at_position
|
||||||
@@ -65,15 +64,12 @@ def _must_be_kwarg(signatures, positional_count, used_kwargs):
|
|||||||
return must_be_kwarg
|
return must_be_kwarg
|
||||||
|
|
||||||
|
|
||||||
def filter_names(inference_state, completion_names, stack, like_name, fuzzy,
|
def filter_names(inference_state, completion_names, stack, like_name, fuzzy, cached_name):
|
||||||
imported_names, cached_name):
|
|
||||||
comp_dct = set()
|
comp_dct = set()
|
||||||
if settings.case_insensitive_completion:
|
if settings.case_insensitive_completion:
|
||||||
like_name = like_name.lower()
|
like_name = like_name.lower()
|
||||||
for name in completion_names:
|
for name in completion_names:
|
||||||
string = name.string_name
|
string = name.string_name
|
||||||
if string in imported_names and string != like_name:
|
|
||||||
continue
|
|
||||||
if settings.case_insensitive_completion:
|
if settings.case_insensitive_completion:
|
||||||
string = string.lower()
|
string = string.lower()
|
||||||
if helpers.match(string, like_name, fuzzy=fuzzy):
|
if helpers.match(string, like_name, fuzzy=fuzzy):
|
||||||
@@ -141,11 +137,6 @@ class Completion:
|
|||||||
|
|
||||||
self._fuzzy = fuzzy
|
self._fuzzy = fuzzy
|
||||||
|
|
||||||
# Return list of completions in this order:
|
|
||||||
# - Beginning with what user is typing
|
|
||||||
# - Public (alphabet)
|
|
||||||
# - Private ("_xxx")
|
|
||||||
# - Dunder ("__xxx")
|
|
||||||
def complete(self):
|
def complete(self):
|
||||||
leaf = self._module_node.get_leaf_for_position(
|
leaf = self._module_node.get_leaf_for_position(
|
||||||
self._original_position,
|
self._original_position,
|
||||||
@@ -177,19 +168,14 @@ class Completion:
|
|||||||
|
|
||||||
cached_name, completion_names = self._complete_python(leaf)
|
cached_name, completion_names = self._complete_python(leaf)
|
||||||
|
|
||||||
imported_names = []
|
|
||||||
if leaf.parent is not None and leaf.parent.type in ['import_as_names', 'dotted_as_names']:
|
|
||||||
imported_names.extend(extract_imported_names(leaf.parent))
|
|
||||||
|
|
||||||
completions = list(filter_names(self._inference_state, completion_names,
|
completions = list(filter_names(self._inference_state, completion_names,
|
||||||
self.stack, self._like_name,
|
self.stack, self._like_name,
|
||||||
self._fuzzy, imported_names, cached_name=cached_name))
|
self._fuzzy, cached_name=cached_name))
|
||||||
|
|
||||||
return (
|
return (
|
||||||
# Removing duplicates mostly to remove False/True/None duplicates.
|
# Removing duplicates mostly to remove False/True/None duplicates.
|
||||||
_remove_duplicates(prefixed_completions, completions)
|
_remove_duplicates(prefixed_completions, completions)
|
||||||
+ sorted(completions, key=lambda x: (not x.name.startswith(self._like_name),
|
+ sorted(completions, key=lambda x: (x.name.startswith('__'),
|
||||||
x.name.startswith('__'),
|
|
||||||
x.name.startswith('_'),
|
x.name.startswith('_'),
|
||||||
x.name.lower()))
|
x.name.lower()))
|
||||||
)
|
)
|
||||||
@@ -208,6 +194,7 @@ class Completion:
|
|||||||
- In args: */**: no completion
|
- In args: */**: no completion
|
||||||
- In params (also lambda): no completion before =
|
- In params (also lambda): no completion before =
|
||||||
"""
|
"""
|
||||||
|
|
||||||
grammar = self._inference_state.grammar
|
grammar = self._inference_state.grammar
|
||||||
self.stack = stack = None
|
self.stack = stack = None
|
||||||
self._position = (
|
self._position = (
|
||||||
@@ -244,8 +231,8 @@ class Completion:
|
|||||||
if previous_leaf is not None:
|
if previous_leaf is not None:
|
||||||
stmt = previous_leaf
|
stmt = previous_leaf
|
||||||
while True:
|
while True:
|
||||||
stmt = stmt.search_ancestor(
|
stmt = search_ancestor(
|
||||||
'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt',
|
stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt',
|
||||||
'error_node',
|
'error_node',
|
||||||
)
|
)
|
||||||
if stmt is None:
|
if stmt is None:
|
||||||
@@ -290,10 +277,6 @@ class Completion:
|
|||||||
)
|
)
|
||||||
elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
|
elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
|
||||||
dot = self._module_node.get_leaf_for_position(self._position)
|
dot = self._module_node.get_leaf_for_position(self._position)
|
||||||
if dot.type == "endmarker":
|
|
||||||
# This is a bit of a weird edge case, maybe we can somehow
|
|
||||||
# generalize this.
|
|
||||||
dot = leaf.get_previous_leaf()
|
|
||||||
cached_name, n = self._complete_trailer(dot.get_previous_leaf())
|
cached_name, n = self._complete_trailer(dot.get_previous_leaf())
|
||||||
completion_names += n
|
completion_names += n
|
||||||
elif self._is_parameter_completion():
|
elif self._is_parameter_completion():
|
||||||
@@ -356,7 +339,7 @@ class Completion:
|
|||||||
stack_node = self.stack[-3]
|
stack_node = self.stack[-3]
|
||||||
if stack_node.nonterminal == 'funcdef':
|
if stack_node.nonterminal == 'funcdef':
|
||||||
context = get_user_context(self._module_context, self._position)
|
context = get_user_context(self._module_context, self._position)
|
||||||
node = leaf.search_ancestor('error_node', 'funcdef')
|
node = search_ancestor(leaf, 'error_node', 'funcdef')
|
||||||
if node is not None:
|
if node is not None:
|
||||||
if node.type == 'error_node':
|
if node.type == 'error_node':
|
||||||
n = node.children[0]
|
n = node.children[0]
|
||||||
@@ -426,7 +409,7 @@ class Completion:
|
|||||||
Autocomplete inherited methods when overriding in child class.
|
Autocomplete inherited methods when overriding in child class.
|
||||||
"""
|
"""
|
||||||
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
|
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
|
||||||
cls = leaf.search_ancestor('classdef')
|
cls = tree.search_ancestor(leaf, 'classdef')
|
||||||
if cls is None:
|
if cls is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -455,7 +438,6 @@ class Completion:
|
|||||||
- Having some doctest code that starts with `>>>`
|
- Having some doctest code that starts with `>>>`
|
||||||
- Having backticks that doesn't have whitespace inside it
|
- Having backticks that doesn't have whitespace inside it
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def iter_relevant_lines(lines):
|
def iter_relevant_lines(lines):
|
||||||
include_next_line = False
|
include_next_line = False
|
||||||
for l in code_lines:
|
for l in code_lines:
|
||||||
@@ -480,12 +462,12 @@ class Completion:
|
|||||||
|
|
||||||
def _complete_code_lines(self, code_lines):
|
def _complete_code_lines(self, code_lines):
|
||||||
module_node = self._inference_state.grammar.parse(''.join(code_lines))
|
module_node = self._inference_state.grammar.parse(''.join(code_lines))
|
||||||
module_value = DocstringModule(
|
module_value = ModuleValue(
|
||||||
in_module_context=self._module_context,
|
self._inference_state,
|
||||||
inference_state=self._inference_state,
|
module_node,
|
||||||
module_node=module_node,
|
|
||||||
code_lines=code_lines,
|
code_lines=code_lines,
|
||||||
)
|
)
|
||||||
|
module_value.parent_context = self._module_context
|
||||||
return Completion(
|
return Completion(
|
||||||
self._inference_state,
|
self._inference_state,
|
||||||
module_value.as_context(),
|
module_value.as_context(),
|
||||||
@@ -678,19 +660,3 @@ def search_in_module(inference_state, module_context, names, wanted_names,
|
|||||||
def_ = classes.Name(inference_state, n2)
|
def_ = classes.Name(inference_state, n2)
|
||||||
if not wanted_type or wanted_type == def_.type:
|
if not wanted_type or wanted_type == def_.type:
|
||||||
yield def_
|
yield def_
|
||||||
|
|
||||||
|
|
||||||
def extract_imported_names(node):
|
|
||||||
imported_names = []
|
|
||||||
|
|
||||||
if node.type in ['import_as_names', 'dotted_as_names', 'dotted_as_name', 'import_as_name']:
|
|
||||||
for index, child in enumerate(node.children):
|
|
||||||
if child.type == 'name':
|
|
||||||
if (index > 1 and node.children[index - 1].type == "keyword"
|
|
||||||
and node.children[index - 1].value == "as"):
|
|
||||||
continue
|
|
||||||
imported_names.append(child.value)
|
|
||||||
elif child.type in ('import_as_name', 'dotted_as_name'):
|
|
||||||
imported_names.extend(extract_imported_names(child))
|
|
||||||
|
|
||||||
return imported_names
|
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import hashlib
|
|||||||
import filecmp
|
import filecmp
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
from shutil import which
|
from shutil import which
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from jedi.cache import memoize_method, time_cache
|
from jedi.cache import memoize_method, time_cache
|
||||||
from jedi.inference.compiled.subprocess import CompiledSubprocess, \
|
from jedi.inference.compiled.subprocess import CompiledSubprocess, \
|
||||||
@@ -16,13 +15,9 @@ from jedi.inference.compiled.subprocess import CompiledSubprocess, \
|
|||||||
|
|
||||||
import parso
|
import parso
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
_VersionInfo = namedtuple('VersionInfo', 'major minor micro')
|
||||||
from jedi.inference import InferenceState
|
|
||||||
|
|
||||||
|
_SUPPORTED_PYTHONS = ['3.9', '3.8', '3.7', '3.6']
|
||||||
_VersionInfo = namedtuple('VersionInfo', 'major minor micro') # type: ignore[name-match]
|
|
||||||
|
|
||||||
_SUPPORTED_PYTHONS = ['3.13', '3.12', '3.11', '3.10', '3.9', '3.8']
|
|
||||||
_SAFE_PATHS = ['/usr/bin', '/usr/local/bin']
|
_SAFE_PATHS = ['/usr/bin', '/usr/local/bin']
|
||||||
_CONDA_VAR = 'CONDA_PREFIX'
|
_CONDA_VAR = 'CONDA_PREFIX'
|
||||||
_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
|
_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
|
||||||
@@ -107,10 +102,7 @@ class Environment(_BaseEnvironment):
|
|||||||
version = '.'.join(str(i) for i in self.version_info)
|
version = '.'.join(str(i) for i in self.version_info)
|
||||||
return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path)
|
return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path)
|
||||||
|
|
||||||
def get_inference_state_subprocess(
|
def get_inference_state_subprocess(self, inference_state):
|
||||||
self,
|
|
||||||
inference_state: 'InferenceState',
|
|
||||||
) -> InferenceStateSubprocess:
|
|
||||||
return InferenceStateSubprocess(inference_state, self._get_subprocess())
|
return InferenceStateSubprocess(inference_state, self._get_subprocess())
|
||||||
|
|
||||||
@memoize_method
|
@memoize_method
|
||||||
@@ -142,10 +134,7 @@ class SameEnvironment(_SameEnvironmentMixin, Environment):
|
|||||||
|
|
||||||
|
|
||||||
class InterpreterEnvironment(_SameEnvironmentMixin, _BaseEnvironment):
|
class InterpreterEnvironment(_SameEnvironmentMixin, _BaseEnvironment):
|
||||||
def get_inference_state_subprocess(
|
def get_inference_state_subprocess(self, inference_state):
|
||||||
self,
|
|
||||||
inference_state: 'InferenceState',
|
|
||||||
) -> InferenceStateSameProcess:
|
|
||||||
return InferenceStateSameProcess(inference_state)
|
return InferenceStateSameProcess(inference_state)
|
||||||
|
|
||||||
def get_sys_path(self):
|
def get_sys_path(self):
|
||||||
@@ -384,13 +373,10 @@ def _get_executable_path(path, safe=True):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if os.name == 'nt':
|
if os.name == 'nt':
|
||||||
pythons = [os.path.join(path, 'Scripts', 'python.exe'), os.path.join(path, 'python.exe')]
|
python = os.path.join(path, 'Scripts', 'python.exe')
|
||||||
else:
|
|
||||||
pythons = [os.path.join(path, 'bin', 'python')]
|
|
||||||
for python in pythons:
|
|
||||||
if os.path.exists(python):
|
|
||||||
break
|
|
||||||
else:
|
else:
|
||||||
|
python = os.path.join(path, 'bin', 'python')
|
||||||
|
if not os.path.exists(python):
|
||||||
raise InvalidPythonEnvironment("%s seems to be missing." % python)
|
raise InvalidPythonEnvironment("%s seems to be missing." % python)
|
||||||
|
|
||||||
_assert_safe(python, safe)
|
_assert_safe(python, safe)
|
||||||
@@ -398,7 +384,8 @@ def _get_executable_path(path, safe=True):
|
|||||||
|
|
||||||
|
|
||||||
def _get_executables_from_windows_registry(version):
|
def _get_executables_from_windows_registry(version):
|
||||||
import winreg
|
# https://github.com/python/typeshed/pull/3794 adds winreg
|
||||||
|
import winreg # type: ignore[import]
|
||||||
|
|
||||||
# TODO: support Python Anaconda.
|
# TODO: support Python Anaconda.
|
||||||
sub_keys = [
|
sub_keys = [
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ class RefactoringError(_JediError):
|
|||||||
Refactorings can fail for various reasons. So if you work with refactorings
|
Refactorings can fail for various reasons. So if you work with refactorings
|
||||||
like :meth:`.Script.rename`, :meth:`.Script.inline`,
|
like :meth:`.Script.rename`, :meth:`.Script.inline`,
|
||||||
:meth:`.Script.extract_variable` and :meth:`.Script.extract_function`, make
|
:meth:`.Script.extract_variable` and :meth:`.Script.extract_function`, make
|
||||||
sure to catch these. The descriptions in the errors are usually valuable
|
sure to catch these. The descriptions in the errors are ususally valuable
|
||||||
for end users.
|
for end users.
|
||||||
|
|
||||||
A typical ``RefactoringError`` would tell the user that inlining is not
|
A typical ``RefactoringError`` would tell the user that inlining is not
|
||||||
|
|||||||
@@ -205,6 +205,7 @@ def filter_follow_imports(names, follow_builtin_imports=False):
|
|||||||
|
|
||||||
class CallDetails:
|
class CallDetails:
|
||||||
def __init__(self, bracket_leaf, children, position):
|
def __init__(self, bracket_leaf, children, position):
|
||||||
|
['bracket_leaf', 'call_index', 'keyword_name_str']
|
||||||
self.bracket_leaf = bracket_leaf
|
self.bracket_leaf = bracket_leaf
|
||||||
self._children = children
|
self._children = children
|
||||||
self._position = position
|
self._position = position
|
||||||
@@ -280,7 +281,7 @@ class CallDetails:
|
|||||||
def count_positional_arguments(self):
|
def count_positional_arguments(self):
|
||||||
count = 0
|
count = 0
|
||||||
for star_count, key_start, had_equal in self._list_arguments()[:-1]:
|
for star_count, key_start, had_equal in self._list_arguments()[:-1]:
|
||||||
if star_count or key_start:
|
if star_count:
|
||||||
break
|
break
|
||||||
count += 1
|
count += 1
|
||||||
return count
|
return count
|
||||||
@@ -306,7 +307,7 @@ def _iter_arguments(nodes, position):
|
|||||||
first = node.children[0]
|
first = node.children[0]
|
||||||
second = node.children[1]
|
second = node.children[1]
|
||||||
if second == '=':
|
if second == '=':
|
||||||
if second.start_pos < position and first.type == 'name':
|
if second.start_pos < position:
|
||||||
yield 0, first.value, True
|
yield 0, first.value, True
|
||||||
else:
|
else:
|
||||||
yield 0, remove_after_pos(first), False
|
yield 0, remove_after_pos(first), False
|
||||||
|
|||||||
@@ -5,7 +5,8 @@ from typing import Dict, Optional
|
|||||||
from jedi.inference.names import AbstractArbitraryName
|
from jedi.inference.names import AbstractArbitraryName
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from pydoc_data import topics
|
# https://github.com/python/typeshed/pull/4351 adds pydoc_data
|
||||||
|
from pydoc_data import topics # type: ignore[import]
|
||||||
pydoc_topics: Optional[Dict[str, str]] = topics.topics
|
pydoc_topics: Optional[Dict[str, str]] = topics.topics
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# Python 3.6.8 embeddable does not have pydoc_data.
|
# Python 3.6.8 embeddable does not have pydoc_data.
|
||||||
|
|||||||
@@ -106,16 +106,7 @@ class Project:
|
|||||||
with open(self._get_json_path(self._path), 'w') as f:
|
with open(self._get_json_path(self._path), 'w') as f:
|
||||||
return json.dump((_SERIALIZER_VERSION, data), f)
|
return json.dump((_SERIALIZER_VERSION, data), f)
|
||||||
|
|
||||||
def __init__(
|
def __init__(self, path, **kwargs):
|
||||||
self,
|
|
||||||
path,
|
|
||||||
*,
|
|
||||||
environment_path=None,
|
|
||||||
load_unsafe_extensions=False,
|
|
||||||
sys_path=None,
|
|
||||||
added_sys_path=(),
|
|
||||||
smart_sys_path=True,
|
|
||||||
) -> None:
|
|
||||||
"""
|
"""
|
||||||
:param path: The base path for this project.
|
:param path: The base path for this project.
|
||||||
:param environment_path: The Python executable path, typically the path
|
:param environment_path: The Python executable path, typically the path
|
||||||
@@ -134,22 +125,25 @@ class Project:
|
|||||||
local directories. Otherwise you will have to rely on your packages
|
local directories. Otherwise you will have to rely on your packages
|
||||||
being properly configured on the ``sys.path``.
|
being properly configured on the ``sys.path``.
|
||||||
"""
|
"""
|
||||||
|
def py2_comp(path, environment_path=None, load_unsafe_extensions=False,
|
||||||
|
sys_path=None, added_sys_path=(), smart_sys_path=True):
|
||||||
|
if isinstance(path, str):
|
||||||
|
path = Path(path).absolute()
|
||||||
|
self._path = path
|
||||||
|
|
||||||
if isinstance(path, str):
|
self._environment_path = environment_path
|
||||||
path = Path(path).absolute()
|
if sys_path is not None:
|
||||||
self._path = path
|
# Remap potential pathlib.Path entries
|
||||||
|
sys_path = list(map(str, sys_path))
|
||||||
self._environment_path = environment_path
|
self._sys_path = sys_path
|
||||||
if sys_path is not None:
|
self._smart_sys_path = smart_sys_path
|
||||||
|
self._load_unsafe_extensions = load_unsafe_extensions
|
||||||
|
self._django = False
|
||||||
# Remap potential pathlib.Path entries
|
# Remap potential pathlib.Path entries
|
||||||
sys_path = list(map(str, sys_path))
|
self.added_sys_path = list(map(str, added_sys_path))
|
||||||
self._sys_path = sys_path
|
"""The sys path that is going to be added at the end of the """
|
||||||
self._smart_sys_path = smart_sys_path
|
|
||||||
self._load_unsafe_extensions = load_unsafe_extensions
|
py2_comp(path, **kwargs)
|
||||||
self._django = False
|
|
||||||
# Remap potential pathlib.Path entries
|
|
||||||
self.added_sys_path = list(map(str, added_sys_path))
|
|
||||||
"""The sys path that is going to be added at the end of the """
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def path(self):
|
def path(self):
|
||||||
@@ -334,8 +328,7 @@ class Project:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# 2. Search for identifiers in the project.
|
# 2. Search for identifiers in the project.
|
||||||
for module_context in search_in_file_ios(inference_state, file_ios,
|
for module_context in search_in_file_ios(inference_state, file_ios, name):
|
||||||
name, complete=complete):
|
|
||||||
names = get_module_names(module_context.tree_node, all_scopes=all_scopes)
|
names = get_module_names(module_context.tree_node, all_scopes=all_scopes)
|
||||||
names = [module_context.create_name(n) for n in names]
|
names = [module_context.create_name(n) for n in names]
|
||||||
names = _remove_imports(names)
|
names = _remove_imports(names)
|
||||||
@@ -352,8 +345,9 @@ class Project:
|
|||||||
# 3. Search for modules on sys.path
|
# 3. Search for modules on sys.path
|
||||||
sys_path = [
|
sys_path = [
|
||||||
p for p in self._get_sys_path(inference_state)
|
p for p in self._get_sys_path(inference_state)
|
||||||
# Exclude the current folder which is handled by recursing the folders.
|
# Exclude folders that are handled by recursing of the Python
|
||||||
if p != self._path
|
# folders.
|
||||||
|
if not p.startswith(str(self._path))
|
||||||
]
|
]
|
||||||
names = list(iter_module_names(inference_state, empty_module_context, sys_path))
|
names = list(iter_module_names(inference_state, empty_module_context, sys_path))
|
||||||
yield from search_in_module(
|
yield from search_in_module(
|
||||||
@@ -432,6 +426,7 @@ def get_default_project(path=None):
|
|||||||
probable_path = dir
|
probable_path = dir
|
||||||
|
|
||||||
if probable_path is not None:
|
if probable_path is not None:
|
||||||
|
# TODO search for setup.py etc
|
||||||
return Project(probable_path)
|
return Project(probable_path)
|
||||||
|
|
||||||
if first_no_init_file is not None:
|
if first_no_init_file is not None:
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ from typing import Dict, Iterable, Tuple
|
|||||||
from parso import split_lines
|
from parso import split_lines
|
||||||
|
|
||||||
from jedi.api.exceptions import RefactoringError
|
from jedi.api.exceptions import RefactoringError
|
||||||
from jedi.inference.value.namespace import ImplicitNSName
|
|
||||||
|
|
||||||
EXPRESSION_PARTS = (
|
EXPRESSION_PARTS = (
|
||||||
'or_test and_test not_test comparison '
|
'or_test and_test not_test comparison '
|
||||||
@@ -43,17 +42,11 @@ class ChangedFile:
|
|||||||
if self._from_path is None:
|
if self._from_path is None:
|
||||||
from_p = ''
|
from_p = ''
|
||||||
else:
|
else:
|
||||||
try:
|
from_p = self._from_path.relative_to(project_path)
|
||||||
from_p = self._from_path.relative_to(project_path)
|
|
||||||
except ValueError: # Happens it the path is not on th project_path
|
|
||||||
from_p = self._from_path
|
|
||||||
if self._to_path is None:
|
if self._to_path is None:
|
||||||
to_p = ''
|
to_p = ''
|
||||||
else:
|
else:
|
||||||
try:
|
to_p = self._to_path.relative_to(project_path)
|
||||||
to_p = self._to_path.relative_to(project_path)
|
|
||||||
except ValueError:
|
|
||||||
to_p = self._to_path
|
|
||||||
diff = difflib.unified_diff(
|
diff = difflib.unified_diff(
|
||||||
old_lines, new_lines,
|
old_lines, new_lines,
|
||||||
fromfile=str(from_p),
|
fromfile=str(from_p),
|
||||||
@@ -103,12 +96,7 @@ class Refactoring:
|
|||||||
to_path=calculate_to_path(path),
|
to_path=calculate_to_path(path),
|
||||||
module_node=next(iter(map_)).get_root_node(),
|
module_node=next(iter(map_)).get_root_node(),
|
||||||
node_to_str_map=map_
|
node_to_str_map=map_
|
||||||
)
|
) for path, map_ in sorted(self._file_to_node_changes.items())
|
||||||
# We need to use `or`, because the path can be None
|
|
||||||
for path, map_ in sorted(
|
|
||||||
self._file_to_node_changes.items(),
|
|
||||||
key=lambda x: x[0] or Path("")
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_renames(self) -> Iterable[Tuple[Path, Path]]:
|
def get_renames(self) -> Iterable[Tuple[Path, Path]]:
|
||||||
@@ -122,7 +110,7 @@ class Refactoring:
|
|||||||
project_path = self._inference_state.project.path
|
project_path = self._inference_state.project.path
|
||||||
for from_, to in self.get_renames():
|
for from_, to in self.get_renames():
|
||||||
text += 'rename from %s\nrename to %s\n' \
|
text += 'rename from %s\nrename to %s\n' \
|
||||||
% (_try_relative_to(from_, project_path), _try_relative_to(to, project_path))
|
% (from_.relative_to(project_path), to.relative_to(project_path))
|
||||||
|
|
||||||
return text + ''.join(f.get_diff() for f in self.get_changed_files().values())
|
return text + ''.join(f.get_diff() for f in self.get_changed_files().values())
|
||||||
|
|
||||||
@@ -152,16 +140,13 @@ def rename(inference_state, definitions, new_name):
|
|||||||
raise RefactoringError("There is no name under the cursor")
|
raise RefactoringError("There is no name under the cursor")
|
||||||
|
|
||||||
for d in definitions:
|
for d in definitions:
|
||||||
# This private access is ok in a way. It's not public to
|
|
||||||
# protect Jedi users from seeing it.
|
|
||||||
tree_name = d._name.tree_name
|
tree_name = d._name.tree_name
|
||||||
if d.type == 'module' and tree_name is None and d.module_path is not None:
|
if d.type == 'module' and tree_name is None:
|
||||||
p = Path(d.module_path)
|
p = None if d.module_path is None else Path(d.module_path)
|
||||||
file_renames.add(_calculate_rename(p, new_name))
|
file_renames.add(_calculate_rename(p, new_name))
|
||||||
elif isinstance(d._name, ImplicitNSName):
|
|
||||||
for p in d._name._value.py__path__():
|
|
||||||
file_renames.add(_calculate_rename(Path(p), new_name))
|
|
||||||
else:
|
else:
|
||||||
|
# This private access is ok in a way. It's not public to
|
||||||
|
# protect Jedi users from seeing it.
|
||||||
if tree_name is not None:
|
if tree_name is not None:
|
||||||
fmap = file_tree_name_map.setdefault(d.module_path, {})
|
fmap = file_tree_name_map.setdefault(d.module_path, {})
|
||||||
fmap[tree_name] = tree_name.prefix + new_name
|
fmap[tree_name] = tree_name.prefix + new_name
|
||||||
@@ -255,10 +240,3 @@ def _remove_indent_of_prefix(prefix):
|
|||||||
Removes the last indentation of a prefix, e.g. " \n \n " becomes " \n \n".
|
Removes the last indentation of a prefix, e.g. " \n \n " becomes " \n \n".
|
||||||
"""
|
"""
|
||||||
return ''.join(split_lines(prefix, keepends=True)[:-1])
|
return ''.join(split_lines(prefix, keepends=True)[:-1])
|
||||||
|
|
||||||
|
|
||||||
def _try_relative_to(path: Path, base: Path) -> Path:
|
|
||||||
try:
|
|
||||||
return path.relative_to(base)
|
|
||||||
except ValueError:
|
|
||||||
return path
|
|
||||||
|
|||||||
@@ -36,11 +36,8 @@ def complete_dict(module_context, code_lines, leaf, position, string, fuzzy):
|
|||||||
string = cut_value_at_position(leaf, position)
|
string = cut_value_at_position(leaf, position)
|
||||||
|
|
||||||
context = module_context.create_context(bracket_leaf)
|
context = module_context.create_context(bracket_leaf)
|
||||||
|
before_bracket_leaf = bracket_leaf.get_previous_leaf()
|
||||||
before_node = before_bracket_leaf = bracket_leaf.get_previous_leaf()
|
if before_bracket_leaf.type in ('atom', 'trailer', 'name'):
|
||||||
if before_node in (')', ']', '}'):
|
|
||||||
before_node = before_node.parent
|
|
||||||
if before_node.type in ('atom', 'trailer', 'name'):
|
|
||||||
values = infer_call_of_leaf(context, before_bracket_leaf)
|
values = infer_call_of_leaf(context, before_bracket_leaf)
|
||||||
return list(_completions_for_dicts(
|
return list(_completions_for_dicts(
|
||||||
module_context.inference_state,
|
module_context.inference_state,
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ def clear_time_caches(delete_all: bool = False) -> None:
|
|||||||
:param delete_all: Deletes also the cache that is normally not deleted,
|
:param delete_all: Deletes also the cache that is normally not deleted,
|
||||||
like parser cache, which is important for faster parsing.
|
like parser cache, which is important for faster parsing.
|
||||||
"""
|
"""
|
||||||
global _time_caches # noqa: F824
|
global _time_caches
|
||||||
|
|
||||||
if delete_all:
|
if delete_all:
|
||||||
for cache in _time_caches.values():
|
for cache in _time_caches.values():
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ try:
|
|||||||
raise ImportError
|
raise ImportError
|
||||||
else:
|
else:
|
||||||
# Use colorama for nicer console output.
|
# Use colorama for nicer console output.
|
||||||
from colorama import Fore, init # type: ignore[import, unused-ignore]
|
from colorama import Fore, init # type: ignore[import]
|
||||||
from colorama import initialise
|
from colorama import initialise
|
||||||
|
|
||||||
def _lazy_colorama_init(): # noqa: F811
|
def _lazy_colorama_init(): # noqa: F811
|
||||||
@@ -106,7 +106,10 @@ def dbg(message, *args, color='GREEN'):
|
|||||||
debug_function(color, i + 'dbg: ' + message % tuple(repr(a) for a in args))
|
debug_function(color, i + 'dbg: ' + message % tuple(repr(a) for a in args))
|
||||||
|
|
||||||
|
|
||||||
def warning(message, *args, format=True):
|
def warning(message, *args, **kwargs):
|
||||||
|
format = kwargs.pop('format', True)
|
||||||
|
assert not kwargs
|
||||||
|
|
||||||
if debug_function and enable_warning:
|
if debug_function and enable_warning:
|
||||||
i = ' ' * _debug_indent
|
i = ' ' * _debug_indent
|
||||||
if format:
|
if format:
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ class InferenceState:
|
|||||||
self.compiled_subprocess = environment.get_inference_state_subprocess(self)
|
self.compiled_subprocess = environment.get_inference_state_subprocess(self)
|
||||||
self.grammar = environment.get_grammar()
|
self.grammar = environment.get_grammar()
|
||||||
|
|
||||||
self.latest_grammar = parso.load_grammar(version='3.13')
|
self.latest_grammar = parso.load_grammar(version='3.7')
|
||||||
self.memoize_cache = {} # for memoize decorators
|
self.memoize_cache = {} # for memoize decorators
|
||||||
self.module_cache = imports.ModuleCache() # does the job of `sys.modules`.
|
self.module_cache = imports.ModuleCache() # does the job of `sys.modules`.
|
||||||
self.stub_module_cache = {} # Dict[Tuple[str, ...], Optional[ModuleValue]]
|
self.stub_module_cache = {} # Dict[Tuple[str, ...], Optional[ModuleValue]]
|
||||||
@@ -99,11 +99,10 @@ class InferenceState:
|
|||||||
self.mixed_cache = {} # see `inference.compiled.mixed._create()`
|
self.mixed_cache = {} # see `inference.compiled.mixed._create()`
|
||||||
self.analysis = []
|
self.analysis = []
|
||||||
self.dynamic_params_depth = 0
|
self.dynamic_params_depth = 0
|
||||||
self.do_dynamic_params_search = settings.dynamic_params
|
|
||||||
self.is_analysis = False
|
self.is_analysis = False
|
||||||
self.project = project
|
self.project = project
|
||||||
self.access_cache = {}
|
self.access_cache = {}
|
||||||
self.allow_unsafe_executions = False
|
self.allow_descriptor_getattr = False
|
||||||
self.flow_analysis_enabled = True
|
self.flow_analysis_enabled = True
|
||||||
|
|
||||||
self.reset_recursion_limitations()
|
self.reset_recursion_limitations()
|
||||||
@@ -122,14 +121,14 @@ class InferenceState:
|
|||||||
return value_set
|
return value_set
|
||||||
|
|
||||||
# mypy doesn't suppport decorated propeties (https://github.com/python/mypy/issues/1362)
|
# mypy doesn't suppport decorated propeties (https://github.com/python/mypy/issues/1362)
|
||||||
@property
|
@property # type: ignore[misc]
|
||||||
@inference_state_function_cache()
|
@inference_state_function_cache()
|
||||||
def builtins_module(self):
|
def builtins_module(self):
|
||||||
module_name = 'builtins'
|
module_name = 'builtins'
|
||||||
builtins_module, = self.import_module((module_name,), sys_path=[])
|
builtins_module, = self.import_module((module_name,), sys_path=())
|
||||||
return builtins_module
|
return builtins_module
|
||||||
|
|
||||||
@property
|
@property # type: ignore[misc]
|
||||||
@inference_state_function_cache()
|
@inference_state_function_cache()
|
||||||
def typing_module(self):
|
def typing_module(self):
|
||||||
typing_module, = self.import_module(('typing',))
|
typing_module, = self.import_module(('typing',))
|
||||||
@@ -182,6 +181,8 @@ class InferenceState:
|
|||||||
|
|
||||||
def parse_and_get_code(self, code=None, path=None,
|
def parse_and_get_code(self, code=None, path=None,
|
||||||
use_latest_grammar=False, file_io=None, **kwargs):
|
use_latest_grammar=False, file_io=None, **kwargs):
|
||||||
|
if path is not None:
|
||||||
|
path = str(path)
|
||||||
if code is None:
|
if code is None:
|
||||||
if file_io is None:
|
if file_io is None:
|
||||||
file_io = FileIO(path)
|
file_io = FileIO(path)
|
||||||
|
|||||||
@@ -22,10 +22,6 @@ from jedi.cache import memoize_method
|
|||||||
sentinel = object()
|
sentinel = object()
|
||||||
|
|
||||||
|
|
||||||
class HasNoContext(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class HelperValueMixin:
|
class HelperValueMixin:
|
||||||
def get_root_context(self):
|
def get_root_context(self):
|
||||||
value = self
|
value = self
|
||||||
@@ -265,7 +261,7 @@ class Value(HelperValueMixin):
|
|||||||
return self.parent_context.is_stub()
|
return self.parent_context.is_stub()
|
||||||
|
|
||||||
def _as_context(self):
|
def _as_context(self):
|
||||||
raise HasNoContext
|
raise NotImplementedError('Not all values need to be converted to contexts: %s', self)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def name(self):
|
def name(self):
|
||||||
@@ -297,7 +293,7 @@ class Value(HelperValueMixin):
|
|||||||
just the `_T` generic parameter.
|
just the `_T` generic parameter.
|
||||||
|
|
||||||
`value_set`: represents the actual argument passed to the parameter
|
`value_set`: represents the actual argument passed to the parameter
|
||||||
we're inferred for, or (for recursive calls) their types. In the
|
we're inferrined for, or (for recursive calls) their types. In the
|
||||||
above example this would first be the representation of the list
|
above example this would first be the representation of the list
|
||||||
`[1]` and then, when recursing, just of `1`.
|
`[1]` and then, when recursing, just of `1`.
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -8,8 +8,6 @@ import warnings
|
|||||||
import re
|
import re
|
||||||
import builtins
|
import builtins
|
||||||
import typing
|
import typing
|
||||||
from pathlib import Path
|
|
||||||
from typing import Optional, Tuple
|
|
||||||
|
|
||||||
from jedi.inference.compiled.getattr_static import getattr_static
|
from jedi.inference.compiled.getattr_static import getattr_static
|
||||||
|
|
||||||
@@ -40,7 +38,7 @@ NOT_CLASS_TYPES = (
|
|||||||
MethodDescriptorType = type(str.replace)
|
MethodDescriptorType = type(str.replace)
|
||||||
WrapperDescriptorType = type(set.__iter__)
|
WrapperDescriptorType = type(set.__iter__)
|
||||||
# `object.__subclasshook__` is an already executed descriptor.
|
# `object.__subclasshook__` is an already executed descriptor.
|
||||||
object_class_dict = type.__dict__["__dict__"].__get__(object) # type: ignore[index]
|
object_class_dict = type.__dict__["__dict__"].__get__(object)
|
||||||
ClassMethodDescriptorType = type(object_class_dict['__subclasshook__'])
|
ClassMethodDescriptorType = type(object_class_dict['__subclasshook__'])
|
||||||
|
|
||||||
_sentinel = object()
|
_sentinel = object()
|
||||||
@@ -147,7 +145,7 @@ class AccessPath:
|
|||||||
self.accesses = accesses
|
self.accesses = accesses
|
||||||
|
|
||||||
|
|
||||||
def create_access_path(inference_state, obj) -> AccessPath:
|
def create_access_path(inference_state, obj):
|
||||||
access = create_access(inference_state, obj)
|
access = create_access(inference_state, obj)
|
||||||
return AccessPath(access.get_access_path_tuples())
|
return AccessPath(access.get_access_path_tuples())
|
||||||
|
|
||||||
@@ -175,16 +173,16 @@ class DirectObjectAccess:
|
|||||||
def _create_access(self, obj):
|
def _create_access(self, obj):
|
||||||
return create_access(self._inference_state, obj)
|
return create_access(self._inference_state, obj)
|
||||||
|
|
||||||
def _create_access_path(self, obj) -> AccessPath:
|
def _create_access_path(self, obj):
|
||||||
return create_access_path(self._inference_state, obj)
|
return create_access_path(self._inference_state, obj)
|
||||||
|
|
||||||
def py__bool__(self):
|
def py__bool__(self):
|
||||||
return bool(self._obj)
|
return bool(self._obj)
|
||||||
|
|
||||||
def py__file__(self) -> Optional[Path]:
|
def py__file__(self):
|
||||||
try:
|
try:
|
||||||
return Path(self._obj.__file__)
|
return self._obj.__file__
|
||||||
except (AttributeError, TypeError):
|
except AttributeError:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def py__doc__(self):
|
def py__doc__(self):
|
||||||
@@ -213,39 +211,18 @@ class DirectObjectAccess:
|
|||||||
def py__getitem__all_values(self):
|
def py__getitem__all_values(self):
|
||||||
if isinstance(self._obj, dict):
|
if isinstance(self._obj, dict):
|
||||||
return [self._create_access_path(v) for v in self._obj.values()]
|
return [self._create_access_path(v) for v in self._obj.values()]
|
||||||
if isinstance(self._obj, (list, tuple)):
|
return self.py__iter__list()
|
||||||
return [self._create_access_path(v) for v in self._obj]
|
|
||||||
|
|
||||||
if self.is_instance():
|
def py__simple_getitem__(self, index):
|
||||||
cls = DirectObjectAccess(self._inference_state, self._obj.__class__)
|
if type(self._obj) not in ALLOWED_GETITEM_TYPES:
|
||||||
return cls.py__getitem__all_values()
|
|
||||||
|
|
||||||
try:
|
|
||||||
getitem = self._obj.__getitem__
|
|
||||||
except AttributeError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
annotation = DirectObjectAccess(self._inference_state, getitem).get_return_annotation()
|
|
||||||
if annotation is not None:
|
|
||||||
return [annotation]
|
|
||||||
return None
|
|
||||||
|
|
||||||
def py__simple_getitem__(self, index, *, safe=True):
|
|
||||||
if safe and type(self._obj) not in ALLOWED_GETITEM_TYPES:
|
|
||||||
# Get rid of side effects, we won't call custom `__getitem__`s.
|
# Get rid of side effects, we won't call custom `__getitem__`s.
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return self._create_access_path(self._obj[index])
|
return self._create_access_path(self._obj[index])
|
||||||
|
|
||||||
def py__iter__list(self):
|
def py__iter__list(self):
|
||||||
try:
|
if not hasattr(self._obj, '__getitem__'):
|
||||||
iter_method = self._obj.__iter__
|
|
||||||
except AttributeError:
|
|
||||||
return None
|
return None
|
||||||
else:
|
|
||||||
p = DirectObjectAccess(self._inference_state, iter_method).get_return_annotation()
|
|
||||||
if p is not None:
|
|
||||||
return [p]
|
|
||||||
|
|
||||||
if type(self._obj) not in ALLOWED_GETITEM_TYPES:
|
if type(self._obj) not in ALLOWED_GETITEM_TYPES:
|
||||||
# Get rid of side effects, we won't call custom `__getitem__`s.
|
# Get rid of side effects, we won't call custom `__getitem__`s.
|
||||||
@@ -329,37 +306,33 @@ class DirectObjectAccess:
|
|||||||
except TypeError:
|
except TypeError:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def is_allowed_getattr(self, name, safe=True) -> Tuple[bool, bool, Optional[AccessPath]]:
|
def is_allowed_getattr(self, name, unsafe=False):
|
||||||
# TODO this API is ugly.
|
# TODO this API is ugly.
|
||||||
|
if unsafe:
|
||||||
|
# Unsafe is mostly used to check for __getattr__/__getattribute__.
|
||||||
|
# getattr_static works for properties, but the underscore methods
|
||||||
|
# are just ignored (because it's safer and avoids more code
|
||||||
|
# execution). See also GH #1378.
|
||||||
|
|
||||||
|
# Avoid warnings, see comment in the next function.
|
||||||
|
with warnings.catch_warnings(record=True):
|
||||||
|
warnings.simplefilter("always")
|
||||||
|
try:
|
||||||
|
return hasattr(self._obj, name), False
|
||||||
|
except Exception:
|
||||||
|
# Obviously has an attribute (propably a property) that
|
||||||
|
# gets executed, so just avoid all exceptions here.
|
||||||
|
return False, False
|
||||||
try:
|
try:
|
||||||
attr, is_get_descriptor = getattr_static(self._obj, name)
|
attr, is_get_descriptor = getattr_static(self._obj, name)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
if not safe:
|
return False, False
|
||||||
# Unsafe is mostly used to check for __getattr__/__getattribute__.
|
|
||||||
# getattr_static works for properties, but the underscore methods
|
|
||||||
# are just ignored (because it's safer and avoids more code
|
|
||||||
# execution). See also GH #1378.
|
|
||||||
|
|
||||||
# Avoid warnings, see comment in the next function.
|
|
||||||
with warnings.catch_warnings(record=True):
|
|
||||||
warnings.simplefilter("always")
|
|
||||||
try:
|
|
||||||
return hasattr(self._obj, name), False, None
|
|
||||||
except Exception:
|
|
||||||
# Obviously has an attribute (probably a property) that
|
|
||||||
# gets executed, so just avoid all exceptions here.
|
|
||||||
pass
|
|
||||||
return False, False, None
|
|
||||||
else:
|
else:
|
||||||
if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS:
|
if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS:
|
||||||
if isinstance(attr, property):
|
|
||||||
if hasattr(attr.fget, '__annotations__'):
|
|
||||||
a = DirectObjectAccess(self._inference_state, attr.fget)
|
|
||||||
return True, True, a.get_return_annotation()
|
|
||||||
# In case of descriptors that have get methods we cannot return
|
# In case of descriptors that have get methods we cannot return
|
||||||
# it's value, because that would mean code execution.
|
# it's value, because that would mean code execution.
|
||||||
return True, True, None
|
return True, True
|
||||||
return True, False, None
|
return True, False
|
||||||
|
|
||||||
def getattr_paths(self, name, default=_sentinel):
|
def getattr_paths(self, name, default=_sentinel):
|
||||||
try:
|
try:
|
||||||
@@ -388,7 +361,7 @@ class DirectObjectAccess:
|
|||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
if module is not None and isinstance(module, str):
|
if module is not None:
|
||||||
try:
|
try:
|
||||||
__import__(module)
|
__import__(module)
|
||||||
# For some modules like _sqlite3, the __module__ for classes is
|
# For some modules like _sqlite3, the __module__ for classes is
|
||||||
@@ -519,7 +492,7 @@ class DirectObjectAccess:
|
|||||||
# the signature. In that case we just want a simple escape for now.
|
# the signature. In that case we just want a simple escape for now.
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
||||||
def get_return_annotation(self) -> Optional[AccessPath]:
|
def get_return_annotation(self):
|
||||||
try:
|
try:
|
||||||
o = self._obj.__annotations__.get('return')
|
o = self._obj.__annotations__.get('return')
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ class MixedObject(ValueWrapper):
|
|||||||
|
|
||||||
This combined logic makes it possible to provide more powerful REPL
|
This combined logic makes it possible to provide more powerful REPL
|
||||||
completion. It allows side effects that are not noticable with the default
|
completion. It allows side effects that are not noticable with the default
|
||||||
parser structure to still be completable.
|
parser structure to still be completeable.
|
||||||
|
|
||||||
The biggest difference from CompiledValue to MixedObject is that we are
|
The biggest difference from CompiledValue to MixedObject is that we are
|
||||||
generally dealing with Python code and not with C code. This will generate
|
generally dealing with Python code and not with C code. This will generate
|
||||||
@@ -142,9 +142,9 @@ class MixedObjectFilter(compiled.CompiledValueFilter):
|
|||||||
super().__init__(inference_state, compiled_value)
|
super().__init__(inference_state, compiled_value)
|
||||||
self._tree_value = tree_value
|
self._tree_value = tree_value
|
||||||
|
|
||||||
def _create_name(self, *args, **kwargs):
|
def _create_name(self, name):
|
||||||
return MixedName(
|
return MixedName(
|
||||||
super()._create_name(*args, **kwargs),
|
super()._create_name(name),
|
||||||
self._tree_value,
|
self._tree_value,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -187,7 +187,7 @@ def _find_syntax_node_name(inference_state, python_object):
|
|||||||
try:
|
try:
|
||||||
python_object = _get_object_to_check(python_object)
|
python_object = _get_object_to_check(python_object)
|
||||||
path = inspect.getsourcefile(python_object)
|
path = inspect.getsourcefile(python_object)
|
||||||
except (OSError, TypeError):
|
except TypeError:
|
||||||
# The type might not be known (e.g. class_with_dict.__weakref__)
|
# The type might not be known (e.g. class_with_dict.__weakref__)
|
||||||
return None
|
return None
|
||||||
path = None if path is None else Path(path)
|
path = None if path is None else Path(path)
|
||||||
@@ -267,7 +267,7 @@ def _find_syntax_node_name(inference_state, python_object):
|
|||||||
@inference_state_function_cache()
|
@inference_state_function_cache()
|
||||||
def _create(inference_state, compiled_value, module_context):
|
def _create(inference_state, compiled_value, module_context):
|
||||||
# TODO accessing this is bad, but it probably doesn't matter that much,
|
# TODO accessing this is bad, but it probably doesn't matter that much,
|
||||||
# because we're working with interpreters only here.
|
# because we're working with interpreteters only here.
|
||||||
python_object = compiled_value.access_handle.access._obj
|
python_object = compiled_value.access_handle.access._obj
|
||||||
result = _find_syntax_node_name(inference_state, python_object)
|
result = _find_syntax_node_name(inference_state, python_object)
|
||||||
if result is None:
|
if result is None:
|
||||||
|
|||||||
@@ -5,26 +5,8 @@ goals:
|
|||||||
1. Making it safer - Segfaults and RuntimeErrors as well as stdout/stderr can
|
1. Making it safer - Segfaults and RuntimeErrors as well as stdout/stderr can
|
||||||
be ignored and dealt with.
|
be ignored and dealt with.
|
||||||
2. Make it possible to handle different Python versions as well as virtualenvs.
|
2. Make it possible to handle different Python versions as well as virtualenvs.
|
||||||
|
|
||||||
The architecture here is briefly:
|
|
||||||
- For each Jedi `Environment` there is a corresponding subprocess which
|
|
||||||
operates within the target environment. If the subprocess dies it is replaced
|
|
||||||
at this level.
|
|
||||||
- `CompiledSubprocess` manages exactly one subprocess and handles communication
|
|
||||||
from the parent side.
|
|
||||||
- `Listener` runs within the subprocess, processing each request and yielding
|
|
||||||
results.
|
|
||||||
- `InterpreterEnvironment` provides an API which matches that of `Environment`,
|
|
||||||
but runs functionality inline rather than within a subprocess. It is thus
|
|
||||||
used both directly in places where a subprocess is unnecessary and/or
|
|
||||||
undesirable and also within subprocesses themselves.
|
|
||||||
- `InferenceStateSubprocess` (or `InferenceStateSameProcess`) provide high
|
|
||||||
level access to functionality within the subprocess from within the parent.
|
|
||||||
Each `InterpreterState` has an instance of one of these, provided by its
|
|
||||||
environment.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import collections
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import queue
|
import queue
|
||||||
@@ -33,7 +15,6 @@ import traceback
|
|||||||
import weakref
|
import weakref
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
from typing import Dict, TYPE_CHECKING
|
|
||||||
|
|
||||||
from jedi._compatibility import pickle_dump, pickle_load
|
from jedi._compatibility import pickle_dump, pickle_load
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
@@ -43,9 +24,6 @@ from jedi.inference.compiled.access import DirectObjectAccess, AccessPath, \
|
|||||||
SignatureParam
|
SignatureParam
|
||||||
from jedi.api.exceptions import InternalError
|
from jedi.api.exceptions import InternalError
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from jedi.inference import InferenceState
|
|
||||||
|
|
||||||
|
|
||||||
_MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py')
|
_MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py')
|
||||||
PICKLE_PROTOCOL = 4
|
PICKLE_PROTOCOL = 4
|
||||||
@@ -104,9 +82,10 @@ def _cleanup_process(process, thread):
|
|||||||
|
|
||||||
|
|
||||||
class _InferenceStateProcess:
|
class _InferenceStateProcess:
|
||||||
def __init__(self, inference_state: 'InferenceState') -> None:
|
def __init__(self, inference_state):
|
||||||
self._inference_state_weakref = weakref.ref(inference_state)
|
self._inference_state_weakref = weakref.ref(inference_state)
|
||||||
self._handles: Dict[int, AccessHandle] = {}
|
self._inference_state_id = id(inference_state)
|
||||||
|
self._handles = {}
|
||||||
|
|
||||||
def get_or_create_access_handle(self, obj):
|
def get_or_create_access_handle(self, obj):
|
||||||
id_ = id(obj)
|
id_ = id(obj)
|
||||||
@@ -136,49 +115,11 @@ class InferenceStateSameProcess(_InferenceStateProcess):
|
|||||||
|
|
||||||
|
|
||||||
class InferenceStateSubprocess(_InferenceStateProcess):
|
class InferenceStateSubprocess(_InferenceStateProcess):
|
||||||
"""
|
def __init__(self, inference_state, compiled_subprocess):
|
||||||
API to functionality which will run in a subprocess.
|
|
||||||
|
|
||||||
This mediates the interaction between an `InferenceState` and the actual
|
|
||||||
execution of functionality running within a `CompiledSubprocess`. Available
|
|
||||||
functions are defined in `.functions`, though should be accessed via
|
|
||||||
attributes on this class of the same name.
|
|
||||||
|
|
||||||
This class is responsible for indicating that the `InferenceState` within
|
|
||||||
the subprocess can be removed once the corresponding instance in the parent
|
|
||||||
goes away.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
inference_state: 'InferenceState',
|
|
||||||
compiled_subprocess: 'CompiledSubprocess',
|
|
||||||
) -> None:
|
|
||||||
super().__init__(inference_state)
|
super().__init__(inference_state)
|
||||||
self._used = False
|
self._used = False
|
||||||
self._compiled_subprocess = compiled_subprocess
|
self._compiled_subprocess = compiled_subprocess
|
||||||
|
|
||||||
# Opaque id we'll pass to the subprocess to identify the context (an
|
|
||||||
# `InferenceState`) which should be used for the request. This allows us
|
|
||||||
# to make subsequent requests which operate on results from previous
|
|
||||||
# ones, while keeping a single subprocess which can work with several
|
|
||||||
# contexts in the parent process. Once it is no longer needed(i.e: when
|
|
||||||
# this class goes away), we also use this id to indicate that the
|
|
||||||
# subprocess can discard the context.
|
|
||||||
#
|
|
||||||
# Note: this id is deliberately coupled to this class (and not to
|
|
||||||
# `InferenceState`) as this class manages access handle mappings which
|
|
||||||
# must correspond to those in the subprocess. This approach also avoids
|
|
||||||
# race conditions from successive `InferenceState`s with the same object
|
|
||||||
# id (as observed while adding support for Python 3.13).
|
|
||||||
#
|
|
||||||
# This value does not need to be the `id()` of this instance, we merely
|
|
||||||
# need to ensure that it enables the (visible) lifetime of the context
|
|
||||||
# within the subprocess to match that of this class. We therefore also
|
|
||||||
# depend on the semantics of `CompiledSubprocess.delete_inference_state`
|
|
||||||
# for correctness.
|
|
||||||
self._inference_state_id = id(self)
|
|
||||||
|
|
||||||
def __getattr__(self, name):
|
def __getattr__(self, name):
|
||||||
func = _get_function(name)
|
func = _get_function(name)
|
||||||
|
|
||||||
@@ -186,7 +127,7 @@ class InferenceStateSubprocess(_InferenceStateProcess):
|
|||||||
self._used = True
|
self._used = True
|
||||||
|
|
||||||
result = self._compiled_subprocess.run(
|
result = self._compiled_subprocess.run(
|
||||||
self._inference_state_id,
|
self._inference_state_weakref(),
|
||||||
func,
|
func,
|
||||||
args=args,
|
args=args,
|
||||||
kwargs=kwargs,
|
kwargs=kwargs,
|
||||||
@@ -222,23 +163,12 @@ class InferenceStateSubprocess(_InferenceStateProcess):
|
|||||||
|
|
||||||
|
|
||||||
class CompiledSubprocess:
|
class CompiledSubprocess:
|
||||||
"""
|
|
||||||
A subprocess which runs inference within a target environment.
|
|
||||||
|
|
||||||
This class manages the interface to a single instance of such a process as
|
|
||||||
well as the lifecycle of the process itself. See `.__main__` and `Listener`
|
|
||||||
for the implementation of the subprocess and details of the protocol.
|
|
||||||
|
|
||||||
A single live instance of this is maintained by `jedi.api.environment.Environment`,
|
|
||||||
so that typically a single subprocess is used at a time.
|
|
||||||
"""
|
|
||||||
|
|
||||||
is_crashed = False
|
is_crashed = False
|
||||||
|
|
||||||
def __init__(self, executable, env_vars=None):
|
def __init__(self, executable, env_vars=None):
|
||||||
self._executable = executable
|
self._executable = executable
|
||||||
self._env_vars = env_vars
|
self._env_vars = env_vars
|
||||||
self._inference_state_deletion_queue = collections.deque()
|
self._inference_state_deletion_queue = queue.deque()
|
||||||
self._cleanup_callable = lambda: None
|
self._cleanup_callable = lambda: None
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
@@ -282,18 +212,18 @@ class CompiledSubprocess:
|
|||||||
t)
|
t)
|
||||||
return process
|
return process
|
||||||
|
|
||||||
def run(self, inference_state_id, function, args=(), kwargs={}):
|
def run(self, inference_state, function, args=(), kwargs={}):
|
||||||
# Delete old inference_states.
|
# Delete old inference_states.
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
delete_id = self._inference_state_deletion_queue.pop()
|
inference_state_id = self._inference_state_deletion_queue.pop()
|
||||||
except IndexError:
|
except IndexError:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
self._send(delete_id, None)
|
self._send(inference_state_id, None)
|
||||||
|
|
||||||
assert callable(function)
|
assert callable(function)
|
||||||
return self._send(inference_state_id, function, args, kwargs)
|
return self._send(id(inference_state), function, args, kwargs)
|
||||||
|
|
||||||
def get_sys_path(self):
|
def get_sys_path(self):
|
||||||
return self._send(None, functions.get_sys_path, (), {})
|
return self._send(None, functions.get_sys_path, (), {})
|
||||||
@@ -341,65 +271,21 @@ class CompiledSubprocess:
|
|||||||
|
|
||||||
def delete_inference_state(self, inference_state_id):
|
def delete_inference_state(self, inference_state_id):
|
||||||
"""
|
"""
|
||||||
Indicate that an inference state (in the subprocess) is no longer
|
Currently we are not deleting inference_state instantly. They only get
|
||||||
needed.
|
deleted once the subprocess is used again. It would probably a better
|
||||||
|
solution to move all of this into a thread. However, the memory usage
|
||||||
The state corresponding to the given id will become inaccessible and the
|
of a single inference_state shouldn't be that high.
|
||||||
id may safely be re-used to refer to a different context.
|
|
||||||
|
|
||||||
Note: it is not guaranteed that the corresponding state will actually be
|
|
||||||
deleted immediately.
|
|
||||||
"""
|
"""
|
||||||
# Warning: if changing the semantics of context deletion see the comment
|
# With an argument - the inference_state gets deleted.
|
||||||
# in `InferenceStateSubprocess.__init__` regarding potential race
|
|
||||||
# conditions.
|
|
||||||
|
|
||||||
# Currently we are not deleting the related state instantly. They only
|
|
||||||
# get deleted once the subprocess is used again. It would probably a
|
|
||||||
# better solution to move all of this into a thread. However, the memory
|
|
||||||
# usage of a single inference_state shouldn't be that high.
|
|
||||||
self._inference_state_deletion_queue.append(inference_state_id)
|
self._inference_state_deletion_queue.append(inference_state_id)
|
||||||
|
|
||||||
|
|
||||||
class Listener:
|
class Listener:
|
||||||
"""
|
|
||||||
Main loop for the subprocess which actually does the inference.
|
|
||||||
|
|
||||||
This class runs within the target environment. It listens to instructions
|
|
||||||
from the parent process, runs inference and returns the results.
|
|
||||||
|
|
||||||
The subprocess has a long lifetime and is expected to process several
|
|
||||||
requests, including for different `InferenceState` instances in the parent.
|
|
||||||
See `CompiledSubprocess` for the parent half of the system.
|
|
||||||
|
|
||||||
Communication is via pickled data sent serially over stdin and stdout.
|
|
||||||
Stderr is read only if the child process crashes.
|
|
||||||
|
|
||||||
The request protocol is a 4-tuple of:
|
|
||||||
* inference_state_id | None: an opaque identifier of the parent's
|
|
||||||
`InferenceState`. An `InferenceState` operating over an
|
|
||||||
`InterpreterEnvironment` is created within this process for each of
|
|
||||||
these, ensuring that each parent context has a corresponding context
|
|
||||||
here. This allows context to be persisted between requests. Unless
|
|
||||||
`None`, the local `InferenceState` will be passed to the given function
|
|
||||||
as the first positional argument.
|
|
||||||
* function | None: the function to run. This is expected to be a member of
|
|
||||||
`.functions`. `None` indicates that the corresponding inference state is
|
|
||||||
no longer needed and should be dropped.
|
|
||||||
* args: positional arguments to the `function`. If any of these are
|
|
||||||
`AccessHandle` instances they will be adapted to the local
|
|
||||||
`InferenceState` before being passed.
|
|
||||||
* kwargs: keyword arguments to the `function`. If any of these are
|
|
||||||
`AccessHandle` instances they will be adapted to the local
|
|
||||||
`InferenceState` before being passed.
|
|
||||||
|
|
||||||
The result protocol is a 3-tuple of either:
|
|
||||||
* (False, None, function result): if the function returns without error, or
|
|
||||||
* (True, traceback, exception): if the function raises an exception
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._inference_states = {}
|
self._inference_states = {}
|
||||||
|
# TODO refactor so we don't need to process anymore just handle
|
||||||
|
# controlling.
|
||||||
|
self._process = _InferenceStateProcess(Listener)
|
||||||
|
|
||||||
def _get_inference_state(self, function, inference_state_id):
|
def _get_inference_state(self, function, inference_state_id):
|
||||||
from jedi.inference import InferenceState
|
from jedi.inference import InferenceState
|
||||||
@@ -421,9 +307,6 @@ class Listener:
|
|||||||
if inference_state_id is None:
|
if inference_state_id is None:
|
||||||
return function(*args, **kwargs)
|
return function(*args, **kwargs)
|
||||||
elif function is None:
|
elif function is None:
|
||||||
# Warning: if changing the semantics of context deletion see the comment
|
|
||||||
# in `InferenceStateSubprocess.__init__` regarding potential race
|
|
||||||
# conditions.
|
|
||||||
del self._inference_states[inference_state_id]
|
del self._inference_states[inference_state_id]
|
||||||
else:
|
else:
|
||||||
inference_state = self._get_inference_state(function, inference_state_id)
|
inference_state = self._get_inference_state(function, inference_state_id)
|
||||||
@@ -464,12 +347,7 @@ class Listener:
|
|||||||
|
|
||||||
|
|
||||||
class AccessHandle:
|
class AccessHandle:
|
||||||
def __init__(
|
def __init__(self, subprocess, access, id_):
|
||||||
self,
|
|
||||||
subprocess: _InferenceStateProcess,
|
|
||||||
access: DirectObjectAccess,
|
|
||||||
id_: int,
|
|
||||||
) -> None:
|
|
||||||
self.access = access
|
self.access = access
|
||||||
self._subprocess = subprocess
|
self._subprocess = subprocess
|
||||||
self.id = id_
|
self.id = id_
|
||||||
|
|||||||
@@ -3,6 +3,10 @@ import sys
|
|||||||
from importlib.abc import MetaPathFinder
|
from importlib.abc import MetaPathFinder
|
||||||
from importlib.machinery import PathFinder
|
from importlib.machinery import PathFinder
|
||||||
|
|
||||||
|
# Remove the first entry, because it's simply a directory entry that equals
|
||||||
|
# this directory.
|
||||||
|
del sys.path[0]
|
||||||
|
|
||||||
|
|
||||||
def _get_paths():
|
def _get_paths():
|
||||||
# Get the path to jedi.
|
# Get the path to jedi.
|
||||||
@@ -17,11 +21,11 @@ class _ExactImporter(MetaPathFinder):
|
|||||||
def __init__(self, path_dct):
|
def __init__(self, path_dct):
|
||||||
self._path_dct = path_dct
|
self._path_dct = path_dct
|
||||||
|
|
||||||
def find_spec(self, fullname, path=None, target=None):
|
def find_module(self, fullname, path=None):
|
||||||
if path is None and fullname in self._path_dct:
|
if path is None and fullname in self._path_dct:
|
||||||
p = self._path_dct[fullname]
|
p = self._path_dct[fullname]
|
||||||
spec = PathFinder.find_spec(fullname, path=[p], target=target)
|
loader = PathFinder.find_module(fullname, path=[p])
|
||||||
return spec
|
return loader
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -2,11 +2,12 @@ import sys
|
|||||||
import os
|
import os
|
||||||
import inspect
|
import inspect
|
||||||
import importlib
|
import importlib
|
||||||
|
import warnings
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from zipfile import ZipFile
|
from zipimport import zipimporter
|
||||||
from zipimport import zipimporter, ZipImportError
|
|
||||||
from importlib.machinery import all_suffixes
|
from importlib.machinery import all_suffixes
|
||||||
|
|
||||||
|
from jedi._compatibility import cast_path
|
||||||
from jedi.inference.compiled import access
|
from jedi.inference.compiled import access
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi import parser_utils
|
from jedi import parser_utils
|
||||||
@@ -14,7 +15,7 @@ from jedi.file_io import KnownContentFileIO, ZipFileIO
|
|||||||
|
|
||||||
|
|
||||||
def get_sys_path():
|
def get_sys_path():
|
||||||
return sys.path
|
return list(map(cast_path, sys.path))
|
||||||
|
|
||||||
|
|
||||||
def load_module(inference_state, **kwargs):
|
def load_module(inference_state, **kwargs):
|
||||||
@@ -92,22 +93,15 @@ def _iter_module_names(inference_state, paths):
|
|||||||
# Python modules/packages
|
# Python modules/packages
|
||||||
for path in paths:
|
for path in paths:
|
||||||
try:
|
try:
|
||||||
dir_entries = ((entry.name, entry.is_dir()) for entry in os.scandir(path))
|
dirs = os.scandir(path)
|
||||||
except OSError:
|
except OSError:
|
||||||
try:
|
# The file might not exist or reading it might lead to an error.
|
||||||
zip_import_info = zipimporter(path)
|
debug.warning("Not possible to list directory: %s", path)
|
||||||
# Unfortunately, there is no public way to access zipimporter's
|
continue
|
||||||
# private _files member. We therefore have to use a
|
for dir_entry in dirs:
|
||||||
# custom function to iterate over the files.
|
name = dir_entry.name
|
||||||
dir_entries = _zip_list_subdirectory(
|
|
||||||
zip_import_info.archive, zip_import_info.prefix)
|
|
||||||
except ZipImportError:
|
|
||||||
# The file might not exist or reading it might lead to an error.
|
|
||||||
debug.warning("Not possible to list directory: %s", path)
|
|
||||||
continue
|
|
||||||
for name, is_dir in dir_entries:
|
|
||||||
# First Namespaces then modules/stubs
|
# First Namespaces then modules/stubs
|
||||||
if is_dir:
|
if dir_entry.is_dir():
|
||||||
# pycache is obviously not an interesting namespace. Also the
|
# pycache is obviously not an interesting namespace. Also the
|
||||||
# name must be a valid identifier.
|
# name must be a valid identifier.
|
||||||
if name != '__pycache__' and name.isidentifier():
|
if name != '__pycache__' and name.isidentifier():
|
||||||
@@ -150,11 +144,7 @@ def _find_module(string, path=None, full_name=None, is_global_search=True):
|
|||||||
|
|
||||||
spec = find_spec(string, p)
|
spec = find_spec(string, p)
|
||||||
if spec is not None:
|
if spec is not None:
|
||||||
if spec.origin == "frozen":
|
|
||||||
continue
|
|
||||||
|
|
||||||
loader = spec.loader
|
loader = spec.loader
|
||||||
|
|
||||||
if loader is None and not spec.has_location:
|
if loader is None and not spec.has_location:
|
||||||
# This is a namespace package.
|
# This is a namespace package.
|
||||||
full_name = string if not path else full_name
|
full_name = string if not path else full_name
|
||||||
@@ -166,16 +156,17 @@ def _find_module(string, path=None, full_name=None, is_global_search=True):
|
|||||||
|
|
||||||
|
|
||||||
def _find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True):
|
def _find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True):
|
||||||
if not loader:
|
loader = loader or importlib.machinery.PathFinder.find_module(string, path)
|
||||||
spec = importlib.machinery.PathFinder.find_spec(string, path)
|
|
||||||
if spec is not None:
|
|
||||||
loader = spec.loader
|
|
||||||
|
|
||||||
if loader is None and path is None: # Fallback to find builtins
|
if loader is None and path is None: # Fallback to find builtins
|
||||||
try:
|
try:
|
||||||
spec = importlib.util.find_spec(string)
|
with warnings.catch_warnings(record=True):
|
||||||
if spec is not None:
|
# Mute "DeprecationWarning: Use importlib.util.find_spec()
|
||||||
loader = spec.loader
|
# instead." While we should replace that in the future, it's
|
||||||
|
# probably good to wait until we deprecate Python 3.3, since
|
||||||
|
# it was added in Python 3.4 and find_loader hasn't been
|
||||||
|
# removed in 3.6.
|
||||||
|
loader = importlib.find_loader(string)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
# See #491. Importlib might raise a ValueError, to avoid this, we
|
# See #491. Importlib might raise a ValueError, to avoid this, we
|
||||||
# just raise an ImportError to fix the issue.
|
# just raise an ImportError to fix the issue.
|
||||||
@@ -199,7 +190,7 @@ def _from_loader(loader, string):
|
|||||||
except AttributeError:
|
except AttributeError:
|
||||||
return None, is_package
|
return None, is_package
|
||||||
else:
|
else:
|
||||||
module_path = get_filename(string)
|
module_path = cast_path(get_filename(string))
|
||||||
|
|
||||||
# To avoid unicode and read bytes, "overwrite" loader.get_source if
|
# To avoid unicode and read bytes, "overwrite" loader.get_source if
|
||||||
# possible.
|
# possible.
|
||||||
@@ -221,7 +212,7 @@ def _from_loader(loader, string):
|
|||||||
if code is None:
|
if code is None:
|
||||||
return None, is_package
|
return None, is_package
|
||||||
if isinstance(loader, zipimporter):
|
if isinstance(loader, zipimporter):
|
||||||
return ZipFileIO(module_path, code, Path(loader.archive)), is_package
|
return ZipFileIO(module_path, code, Path(cast_path(loader.archive))), is_package
|
||||||
|
|
||||||
return KnownContentFileIO(module_path, code), is_package
|
return KnownContentFileIO(module_path, code), is_package
|
||||||
|
|
||||||
@@ -239,17 +230,6 @@ def _get_source(loader, fullname):
|
|||||||
name=fullname)
|
name=fullname)
|
||||||
|
|
||||||
|
|
||||||
def _zip_list_subdirectory(zip_path, zip_subdir_path):
|
|
||||||
zip_file = ZipFile(zip_path)
|
|
||||||
zip_subdir_path = Path(zip_subdir_path)
|
|
||||||
zip_content_file_paths = zip_file.namelist()
|
|
||||||
for raw_file_name in zip_content_file_paths:
|
|
||||||
file_path = Path(raw_file_name)
|
|
||||||
if file_path.parent == zip_subdir_path:
|
|
||||||
file_path = file_path.relative_to(zip_subdir_path)
|
|
||||||
yield file_path.name, raw_file_name.endswith("/")
|
|
||||||
|
|
||||||
|
|
||||||
class ImplicitNSInfo:
|
class ImplicitNSInfo:
|
||||||
"""Stores information returned from an implicit namespace spec"""
|
"""Stores information returned from an implicit namespace spec"""
|
||||||
def __init__(self, name, paths):
|
def __init__(self, name, paths):
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ import re
|
|||||||
from functools import partial
|
from functools import partial
|
||||||
from inspect import Parameter
|
from inspect import Parameter
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi.inference.utils import to_list
|
from jedi.inference.utils import to_list
|
||||||
|
from jedi._compatibility import cast_path
|
||||||
from jedi.cache import memoize_method
|
from jedi.cache import memoize_method
|
||||||
from jedi.inference.filters import AbstractFilter
|
from jedi.inference.filters import AbstractFilter
|
||||||
from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \
|
from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \
|
||||||
@@ -51,6 +51,7 @@ class CompiledValue(Value):
|
|||||||
def py__call__(self, arguments):
|
def py__call__(self, arguments):
|
||||||
return_annotation = self.access_handle.get_return_annotation()
|
return_annotation = self.access_handle.get_return_annotation()
|
||||||
if return_annotation is not None:
|
if return_annotation is not None:
|
||||||
|
# TODO the return annotation may also be a string.
|
||||||
return create_from_access_path(
|
return create_from_access_path(
|
||||||
self.inference_state,
|
self.inference_state,
|
||||||
return_annotation
|
return_annotation
|
||||||
@@ -162,14 +163,11 @@ class CompiledValue(Value):
|
|||||||
def py__simple_getitem__(self, index):
|
def py__simple_getitem__(self, index):
|
||||||
with reraise_getitem_errors(IndexError, KeyError, TypeError):
|
with reraise_getitem_errors(IndexError, KeyError, TypeError):
|
||||||
try:
|
try:
|
||||||
access = self.access_handle.py__simple_getitem__(
|
access = self.access_handle.py__simple_getitem__(index)
|
||||||
index,
|
|
||||||
safe=not self.inference_state.allow_unsafe_executions
|
|
||||||
)
|
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
return super().py__simple_getitem__(index)
|
return super().py__simple_getitem__(index)
|
||||||
if access is None:
|
if access is None:
|
||||||
return super().py__simple_getitem__(index)
|
return NO_VALUES
|
||||||
|
|
||||||
return ValueSet([create_from_access_path(self.inference_state, access)])
|
return ValueSet([create_from_access_path(self.inference_state, access)])
|
||||||
|
|
||||||
@@ -295,7 +293,10 @@ class CompiledModule(CompiledValue):
|
|||||||
return CompiledModuleContext(self)
|
return CompiledModuleContext(self)
|
||||||
|
|
||||||
def py__path__(self):
|
def py__path__(self):
|
||||||
return self.access_handle.py__path__()
|
paths = self.access_handle.py__path__()
|
||||||
|
if paths is None:
|
||||||
|
return None
|
||||||
|
return map(cast_path, paths)
|
||||||
|
|
||||||
def is_package(self):
|
def is_package(self):
|
||||||
return self.py__path__() is not None
|
return self.py__path__() is not None
|
||||||
@@ -308,17 +309,19 @@ class CompiledModule(CompiledValue):
|
|||||||
return ()
|
return ()
|
||||||
return tuple(name.split('.'))
|
return tuple(name.split('.'))
|
||||||
|
|
||||||
def py__file__(self) -> Optional[Path]:
|
def py__file__(self):
|
||||||
return self.access_handle.py__file__() # type: ignore[no-any-return]
|
path = cast_path(self.access_handle.py__file__())
|
||||||
|
if path is None:
|
||||||
|
return None
|
||||||
|
return Path(path)
|
||||||
|
|
||||||
|
|
||||||
class CompiledName(AbstractNameDefinition):
|
class CompiledName(AbstractNameDefinition):
|
||||||
def __init__(self, inference_state, parent_value, name, is_descriptor):
|
def __init__(self, inference_state, parent_value, name):
|
||||||
self._inference_state = inference_state
|
self._inference_state = inference_state
|
||||||
self.parent_context = parent_value.as_context()
|
self.parent_context = parent_value.as_context()
|
||||||
self._parent_value = parent_value
|
self._parent_value = parent_value
|
||||||
self.string_name = name
|
self.string_name = name
|
||||||
self.is_descriptor = is_descriptor
|
|
||||||
|
|
||||||
def py__doc__(self):
|
def py__doc__(self):
|
||||||
return self.infer_compiled_value().py__doc__()
|
return self.infer_compiled_value().py__doc__()
|
||||||
@@ -345,11 +348,6 @@ class CompiledName(AbstractNameDefinition):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def api_type(self):
|
def api_type(self):
|
||||||
if self.is_descriptor:
|
|
||||||
# In case of properties we want to avoid executions as much as
|
|
||||||
# possible. Since the api_type can be wrong for other reasons
|
|
||||||
# anyway, we just return instance here.
|
|
||||||
return "instance"
|
|
||||||
return self.infer_compiled_value().api_type
|
return self.infer_compiled_value().api_type
|
||||||
|
|
||||||
def infer(self):
|
def infer(self):
|
||||||
@@ -440,10 +438,9 @@ class CompiledValueFilter(AbstractFilter):
|
|||||||
|
|
||||||
def get(self, name):
|
def get(self, name):
|
||||||
access_handle = self.compiled_value.access_handle
|
access_handle = self.compiled_value.access_handle
|
||||||
safe = not self._inference_state.allow_unsafe_executions
|
|
||||||
return self._get(
|
return self._get(
|
||||||
name,
|
name,
|
||||||
lambda name: access_handle.is_allowed_getattr(name, safe=safe),
|
lambda name, unsafe: access_handle.is_allowed_getattr(name, unsafe),
|
||||||
lambda name: name in access_handle.dir(),
|
lambda name: name in access_handle.dir(),
|
||||||
check_has_attribute=True
|
check_has_attribute=True
|
||||||
)
|
)
|
||||||
@@ -452,40 +449,36 @@ class CompiledValueFilter(AbstractFilter):
|
|||||||
"""
|
"""
|
||||||
To remove quite a few access calls we introduced the callback here.
|
To remove quite a few access calls we introduced the callback here.
|
||||||
"""
|
"""
|
||||||
has_attribute, is_descriptor, property_return_annotation = allowed_getattr_callback(
|
if self._inference_state.allow_descriptor_getattr:
|
||||||
name,
|
pass
|
||||||
)
|
|
||||||
if property_return_annotation is not None:
|
|
||||||
values = create_from_access_path(
|
|
||||||
self._inference_state,
|
|
||||||
property_return_annotation
|
|
||||||
).execute_annotation()
|
|
||||||
if values:
|
|
||||||
return [CompiledValueName(v, name) for v in values]
|
|
||||||
|
|
||||||
|
has_attribute, is_descriptor = allowed_getattr_callback(
|
||||||
|
name,
|
||||||
|
unsafe=self._inference_state.allow_descriptor_getattr
|
||||||
|
)
|
||||||
if check_has_attribute and not has_attribute:
|
if check_has_attribute and not has_attribute:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
if (is_descriptor or not has_attribute) \
|
if (is_descriptor or not has_attribute) \
|
||||||
and not self._inference_state.allow_unsafe_executions:
|
and not self._inference_state.allow_descriptor_getattr:
|
||||||
return [self._get_cached_name(name, is_empty=True)]
|
return [self._get_cached_name(name, is_empty=True)]
|
||||||
|
|
||||||
if self.is_instance and not in_dir_callback(name):
|
if self.is_instance and not in_dir_callback(name):
|
||||||
return []
|
return []
|
||||||
return [self._get_cached_name(name, is_descriptor=is_descriptor)]
|
return [self._get_cached_name(name)]
|
||||||
|
|
||||||
@memoize_method
|
@memoize_method
|
||||||
def _get_cached_name(self, name, is_empty=False, *, is_descriptor=False):
|
def _get_cached_name(self, name, is_empty=False):
|
||||||
if is_empty:
|
if is_empty:
|
||||||
return EmptyCompiledName(self._inference_state, name)
|
return EmptyCompiledName(self._inference_state, name)
|
||||||
else:
|
else:
|
||||||
return self._create_name(name, is_descriptor=is_descriptor)
|
return self._create_name(name)
|
||||||
|
|
||||||
def values(self):
|
def values(self):
|
||||||
from jedi.inference.compiled import builtin_from_name
|
from jedi.inference.compiled import builtin_from_name
|
||||||
names = []
|
names = []
|
||||||
needs_type_completions, dir_infos = self.compiled_value.access_handle.get_dir_infos()
|
needs_type_completions, dir_infos = self.compiled_value.access_handle.get_dir_infos()
|
||||||
# We could use `safe=False` here as well, especially as a parameter to
|
# We could use `unsafe` here as well, especially as a parameter to
|
||||||
# get_dir_infos. But this would lead to a lot of property executions
|
# get_dir_infos. But this would lead to a lot of property executions
|
||||||
# that are probably not wanted. The drawback for this is that we
|
# that are probably not wanted. The drawback for this is that we
|
||||||
# have a different name for `get` and `values`. For `get` we always
|
# have a different name for `get` and `values`. For `get` we always
|
||||||
@@ -493,7 +486,7 @@ class CompiledValueFilter(AbstractFilter):
|
|||||||
for name in dir_infos:
|
for name in dir_infos:
|
||||||
names += self._get(
|
names += self._get(
|
||||||
name,
|
name,
|
||||||
lambda name: dir_infos[name],
|
lambda name, unsafe: dir_infos[name],
|
||||||
lambda name: name in dir_infos,
|
lambda name: name in dir_infos,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -503,12 +496,11 @@ class CompiledValueFilter(AbstractFilter):
|
|||||||
names += filter.values()
|
names += filter.values()
|
||||||
return names
|
return names
|
||||||
|
|
||||||
def _create_name(self, name, is_descriptor):
|
def _create_name(self, name):
|
||||||
return CompiledName(
|
return CompiledName(
|
||||||
self._inference_state,
|
self._inference_state,
|
||||||
self.compiled_value,
|
self.compiled_value,
|
||||||
name,
|
name
|
||||||
is_descriptor,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from pathlib import Path
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
|
from parso.tree import search_ancestor
|
||||||
from parso.python.tree import Name
|
from parso.python.tree import Name
|
||||||
|
|
||||||
from jedi.inference.filters import ParserTreeFilter, MergedFilter, \
|
from jedi.inference.filters import ParserTreeFilter, MergedFilter, \
|
||||||
@@ -289,7 +288,7 @@ class TreeContextMixin:
|
|||||||
def create_name(self, tree_name):
|
def create_name(self, tree_name):
|
||||||
definition = tree_name.get_definition()
|
definition = tree_name.get_definition()
|
||||||
if definition and definition.type == 'param' and definition.name == tree_name:
|
if definition and definition.type == 'param' and definition.name == tree_name:
|
||||||
funcdef = definition.search_ancestor('funcdef', 'lambdef')
|
funcdef = search_ancestor(definition, 'funcdef', 'lambdef')
|
||||||
func = self.create_value(funcdef)
|
func = self.create_value(funcdef)
|
||||||
return AnonymousParamName(func, tree_name)
|
return AnonymousParamName(func, tree_name)
|
||||||
else:
|
else:
|
||||||
@@ -308,8 +307,8 @@ class FunctionContext(TreeContextMixin, ValueContext):
|
|||||||
|
|
||||||
|
|
||||||
class ModuleContext(TreeContextMixin, ValueContext):
|
class ModuleContext(TreeContextMixin, ValueContext):
|
||||||
def py__file__(self) -> Optional[Path]:
|
def py__file__(self):
|
||||||
return self._value.py__file__() # type: ignore[no-any-return]
|
return self._value.py__file__()
|
||||||
|
|
||||||
def get_filters(self, until_position=None, origin_scope=None):
|
def get_filters(self, until_position=None, origin_scope=None):
|
||||||
filters = self._value.get_filters(origin_scope)
|
filters = self._value.get_filters(origin_scope)
|
||||||
@@ -326,7 +325,7 @@ class ModuleContext(TreeContextMixin, ValueContext):
|
|||||||
yield from filters
|
yield from filters
|
||||||
|
|
||||||
def get_global_filter(self):
|
def get_global_filter(self):
|
||||||
return GlobalNameFilter(self)
|
return GlobalNameFilter(self, self.tree_node)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def string_names(self):
|
def string_names(self):
|
||||||
@@ -356,8 +355,8 @@ class NamespaceContext(TreeContextMixin, ValueContext):
|
|||||||
def string_names(self):
|
def string_names(self):
|
||||||
return self._value.string_names
|
return self._value.string_names
|
||||||
|
|
||||||
def py__file__(self) -> Optional[Path]:
|
def py__file__(self):
|
||||||
return self._value.py__file__() # type: ignore[no-any-return]
|
return self._value.py__file__()
|
||||||
|
|
||||||
|
|
||||||
class ClassContext(TreeContextMixin, ValueContext):
|
class ClassContext(TreeContextMixin, ValueContext):
|
||||||
@@ -406,8 +405,8 @@ class CompiledModuleContext(CompiledContext):
|
|||||||
def string_names(self):
|
def string_names(self):
|
||||||
return self._value.string_names
|
return self._value.string_names
|
||||||
|
|
||||||
def py__file__(self) -> Optional[Path]:
|
def py__file__(self):
|
||||||
return self._value.py__file__() # type: ignore[no-any-return]
|
return self._value.py__file__()
|
||||||
|
|
||||||
|
|
||||||
def _get_global_filters_for_name(context, name_or_none, position):
|
def _get_global_filters_for_name(context, name_or_none, position):
|
||||||
@@ -415,13 +414,13 @@ def _get_global_filters_for_name(context, name_or_none, position):
|
|||||||
# function and get inferred in the value before the function. So
|
# function and get inferred in the value before the function. So
|
||||||
# make sure to exclude the function/class name.
|
# make sure to exclude the function/class name.
|
||||||
if name_or_none is not None:
|
if name_or_none is not None:
|
||||||
ancestor = name_or_none.search_ancestor('funcdef', 'classdef', 'lambdef')
|
ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef', 'lambdef')
|
||||||
lambdef = None
|
lambdef = None
|
||||||
if ancestor == 'lambdef':
|
if ancestor == 'lambdef':
|
||||||
# For lambdas it's even more complicated since parts will
|
# For lambdas it's even more complicated since parts will
|
||||||
# be inferred later.
|
# be inferred later.
|
||||||
lambdef = ancestor
|
lambdef = ancestor
|
||||||
ancestor = name_or_none.search_ancestor('funcdef', 'classdef')
|
ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef')
|
||||||
if ancestor is not None:
|
if ancestor is not None:
|
||||||
colon = ancestor.children[-2]
|
colon = ancestor.children[-2]
|
||||||
if position is not None and position < colon.start_pos:
|
if position is not None and position < colon.start_pos:
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
from jedi.inference.value import ModuleValue
|
|
||||||
from jedi.inference.context import ModuleContext
|
|
||||||
|
|
||||||
|
|
||||||
class DocstringModule(ModuleValue):
|
|
||||||
def __init__(self, in_module_context, **kwargs):
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
self._in_module_context = in_module_context
|
|
||||||
|
|
||||||
def _as_context(self):
|
|
||||||
return DocstringModuleContext(self, self._in_module_context)
|
|
||||||
|
|
||||||
|
|
||||||
class DocstringModuleContext(ModuleContext):
|
|
||||||
def __init__(self, module_value, in_module_context):
|
|
||||||
super().__init__(module_value)
|
|
||||||
self._in_module_context = in_module_context
|
|
||||||
|
|
||||||
def get_filters(self, origin_scope=None, until_position=None):
|
|
||||||
yield from super().get_filters(until_position=until_position)
|
|
||||||
yield from self._in_module_context.get_filters()
|
|
||||||
@@ -17,10 +17,12 @@ annotations.
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
import warnings
|
import warnings
|
||||||
|
from textwrap import dedent
|
||||||
|
|
||||||
from parso import parse, ParserSyntaxError
|
from parso import parse, ParserSyntaxError
|
||||||
|
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
|
from jedi.common import indent_block
|
||||||
from jedi.inference.cache import inference_state_method_cache
|
from jedi.inference.cache import inference_state_method_cache
|
||||||
from jedi.inference.base_value import iterator_to_value_set, ValueSet, \
|
from jedi.inference.base_value import iterator_to_value_set, ValueSet, \
|
||||||
NO_VALUES
|
NO_VALUES
|
||||||
@@ -48,7 +50,7 @@ def _get_numpy_doc_string_cls():
|
|||||||
global _numpy_doc_string_cache
|
global _numpy_doc_string_cache
|
||||||
if isinstance(_numpy_doc_string_cache, (ImportError, SyntaxError)):
|
if isinstance(_numpy_doc_string_cache, (ImportError, SyntaxError)):
|
||||||
raise _numpy_doc_string_cache
|
raise _numpy_doc_string_cache
|
||||||
from numpydoc.docscrape import NumpyDocString # type: ignore[import, unused-ignore]
|
from numpydoc.docscrape import NumpyDocString # type: ignore[import]
|
||||||
_numpy_doc_string_cache = NumpyDocString
|
_numpy_doc_string_cache = NumpyDocString
|
||||||
return _numpy_doc_string_cache
|
return _numpy_doc_string_cache
|
||||||
|
|
||||||
@@ -109,7 +111,7 @@ def _expand_typestr(type_str):
|
|||||||
yield type_str.split('of')[0]
|
yield type_str.split('of')[0]
|
||||||
# Check if type has is a set of valid literal values eg: {'C', 'F', 'A'}
|
# Check if type has is a set of valid literal values eg: {'C', 'F', 'A'}
|
||||||
elif type_str.startswith('{'):
|
elif type_str.startswith('{'):
|
||||||
node = parse(type_str, version='3.13').children[0]
|
node = parse(type_str, version='3.7').children[0]
|
||||||
if node.type == 'atom':
|
if node.type == 'atom':
|
||||||
for leaf in getattr(node.children[1], "children", []):
|
for leaf in getattr(node.children[1], "children", []):
|
||||||
if leaf.type == 'number':
|
if leaf.type == 'number':
|
||||||
@@ -180,40 +182,52 @@ def _strip_rst_role(type_str):
|
|||||||
|
|
||||||
|
|
||||||
def _infer_for_statement_string(module_context, string):
|
def _infer_for_statement_string(module_context, string):
|
||||||
|
code = dedent("""
|
||||||
|
def pseudo_docstring_stuff():
|
||||||
|
'''
|
||||||
|
Create a pseudo function for docstring statements.
|
||||||
|
Need this docstring so that if the below part is not valid Python this
|
||||||
|
is still a function.
|
||||||
|
'''
|
||||||
|
{}
|
||||||
|
""")
|
||||||
if string is None:
|
if string is None:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
potential_imports = re.findall(r'((?:\w+\.)*\w+)\.', string)
|
for element in re.findall(r'((?:\w+\.)*\w+)\.', string):
|
||||||
# Try to import module part in dotted name.
|
# Try to import module part in dotted name.
|
||||||
# (e.g., 'threading' in 'threading.Thread').
|
# (e.g., 'threading' in 'threading.Thread').
|
||||||
imports = "\n".join(f"import {p}" for p in potential_imports)
|
string = 'import %s\n' % element + string
|
||||||
string = f'{imports}\n{string}'
|
|
||||||
|
|
||||||
debug.dbg('Parse docstring code %s', string, color='BLUE')
|
debug.dbg('Parse docstring code %s', string, color='BLUE')
|
||||||
grammar = module_context.inference_state.grammar
|
grammar = module_context.inference_state.grammar
|
||||||
try:
|
try:
|
||||||
module = grammar.parse(string, error_recovery=False)
|
module = grammar.parse(code.format(indent_block(string)), error_recovery=False)
|
||||||
except ParserSyntaxError:
|
except ParserSyntaxError:
|
||||||
return []
|
return []
|
||||||
try:
|
try:
|
||||||
# It's not the last item, because that's an end marker.
|
funcdef = next(module.iter_funcdefs())
|
||||||
stmt = module.children[-2]
|
# First pick suite, then simple_stmt and then the node,
|
||||||
|
# which is also not the last item, because there's a newline.
|
||||||
|
stmt = funcdef.children[-1].children[-1].children[-2]
|
||||||
except (AttributeError, IndexError):
|
except (AttributeError, IndexError):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
if stmt.type not in ('name', 'atom', 'atom_expr'):
|
if stmt.type not in ('name', 'atom', 'atom_expr'):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# Here we basically use a fake module that also uses the filters in
|
from jedi.inference.value import FunctionValue
|
||||||
# the actual module.
|
function_value = FunctionValue(
|
||||||
from jedi.inference.docstring_utils import DocstringModule
|
module_context.inference_state,
|
||||||
m = DocstringModule(
|
module_context,
|
||||||
in_module_context=module_context,
|
funcdef
|
||||||
inference_state=module_context.inference_state,
|
|
||||||
module_node=module,
|
|
||||||
code_lines=[],
|
|
||||||
)
|
)
|
||||||
return list(_execute_types_in_stmt(m.as_context(), stmt))
|
func_execution_context = function_value.as_context()
|
||||||
|
# Use the module of the param.
|
||||||
|
# TODO this module is not the module of the param in case of a function
|
||||||
|
# call. In that case it's the module of the function call.
|
||||||
|
# stuffed with content from a function call.
|
||||||
|
return list(_execute_types_in_stmt(func_execution_context, stmt))
|
||||||
|
|
||||||
|
|
||||||
def _execute_types_in_stmt(module_context, stmt):
|
def _execute_types_in_stmt(module_context, stmt):
|
||||||
|
|||||||
@@ -66,11 +66,11 @@ def dynamic_param_lookup(function_value, param_index):
|
|||||||
have to look for all calls to ``func`` to find out what ``foo`` possibly
|
have to look for all calls to ``func`` to find out what ``foo`` possibly
|
||||||
is.
|
is.
|
||||||
"""
|
"""
|
||||||
if not function_value.inference_state.do_dynamic_params_search:
|
|
||||||
return NO_VALUES
|
|
||||||
|
|
||||||
funcdef = function_value.tree_node
|
funcdef = function_value.tree_node
|
||||||
|
|
||||||
|
if not settings.dynamic_params:
|
||||||
|
return NO_VALUES
|
||||||
|
|
||||||
path = function_value.get_root_context().py__file__()
|
path = function_value.get_root_context().py__file__()
|
||||||
if path is not None and is_stdlib_path(path):
|
if path is not None and is_stdlib_path(path):
|
||||||
# We don't want to search for references in the stdlib. Usually people
|
# We don't want to search for references in the stdlib. Usually people
|
||||||
|
|||||||
@@ -6,12 +6,13 @@ from abc import abstractmethod
|
|||||||
from typing import List, MutableMapping, Type
|
from typing import List, MutableMapping, Type
|
||||||
import weakref
|
import weakref
|
||||||
|
|
||||||
|
from parso.tree import search_ancestor
|
||||||
from parso.python.tree import Name, UsedNamesMapping
|
from parso.python.tree import Name, UsedNamesMapping
|
||||||
|
|
||||||
from jedi.inference import flow_analysis
|
from jedi.inference import flow_analysis
|
||||||
from jedi.inference.base_value import ValueSet, ValueWrapper, \
|
from jedi.inference.base_value import ValueSet, ValueWrapper, \
|
||||||
LazyValueWrapper
|
LazyValueWrapper
|
||||||
from jedi.parser_utils import get_cached_parent_scope, get_parso_cache_node
|
from jedi.parser_utils import get_cached_parent_scope
|
||||||
from jedi.inference.utils import to_list
|
from jedi.inference.utils import to_list
|
||||||
from jedi.inference.names import TreeNameDefinition, ParamName, \
|
from jedi.inference.names import TreeNameDefinition, ParamName, \
|
||||||
AnonymousParamName, AbstractNameDefinition, NameWrapper
|
AnonymousParamName, AbstractNameDefinition, NameWrapper
|
||||||
@@ -53,15 +54,11 @@ class FilterWrapper:
|
|||||||
return self.wrap_names(self._wrapped_filter.values())
|
return self.wrap_names(self._wrapped_filter.values())
|
||||||
|
|
||||||
|
|
||||||
def _get_definition_names(parso_cache_node, used_names, name_key):
|
def _get_definition_names(used_names, name_key):
|
||||||
if parso_cache_node is None:
|
|
||||||
names = used_names.get(name_key, ())
|
|
||||||
return tuple(name for name in names if name.is_definition(include_setitem=True))
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
for_module = _definition_name_cache[parso_cache_node]
|
for_module = _definition_name_cache[used_names]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
for_module = _definition_name_cache[parso_cache_node] = {}
|
for_module = _definition_name_cache[used_names] = {}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return for_module[name_key]
|
return for_module[name_key]
|
||||||
@@ -73,40 +70,18 @@ def _get_definition_names(parso_cache_node, used_names, name_key):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class _AbstractUsedNamesFilter(AbstractFilter):
|
class AbstractUsedNamesFilter(AbstractFilter):
|
||||||
name_class = TreeNameDefinition
|
name_class = TreeNameDefinition
|
||||||
|
|
||||||
def __init__(self, parent_context, node_context=None):
|
def __init__(self, parent_context, parser_scope):
|
||||||
if node_context is None:
|
self._parser_scope = parser_scope
|
||||||
node_context = parent_context
|
self._module_node = self._parser_scope.get_root_node()
|
||||||
self._node_context = node_context
|
self._used_names = self._module_node.get_used_names()
|
||||||
self._parser_scope = node_context.tree_node
|
|
||||||
module_context = node_context.get_root_context()
|
|
||||||
# It is quite hacky that we have to use that. This is for caching
|
|
||||||
# certain things with a WeakKeyDictionary. However, parso intentionally
|
|
||||||
# uses slots (to save memory) and therefore we end up with having to
|
|
||||||
# have a weak reference to the object that caches the tree.
|
|
||||||
#
|
|
||||||
# Previously we have tried to solve this by using a weak reference onto
|
|
||||||
# used_names. However that also does not work, because it has a
|
|
||||||
# reference from the module, which itself is referenced by any node
|
|
||||||
# through parents.
|
|
||||||
path = module_context.py__file__()
|
|
||||||
if path is None:
|
|
||||||
# If the path is None, there is no guarantee that parso caches it.
|
|
||||||
self._parso_cache_node = None
|
|
||||||
else:
|
|
||||||
self._parso_cache_node = get_parso_cache_node(
|
|
||||||
module_context.inference_state.latest_grammar
|
|
||||||
if module_context.is_stub() else module_context.inference_state.grammar,
|
|
||||||
path
|
|
||||||
)
|
|
||||||
self._used_names = module_context.tree_node.get_used_names()
|
|
||||||
self.parent_context = parent_context
|
self.parent_context = parent_context
|
||||||
|
|
||||||
def get(self, name):
|
def get(self, name):
|
||||||
return self._convert_names(self._filter(
|
return self._convert_names(self._filter(
|
||||||
_get_definition_names(self._parso_cache_node, self._used_names, name),
|
_get_definition_names(self._used_names, name),
|
||||||
))
|
))
|
||||||
|
|
||||||
def _convert_names(self, names):
|
def _convert_names(self, names):
|
||||||
@@ -117,7 +92,7 @@ class _AbstractUsedNamesFilter(AbstractFilter):
|
|||||||
name
|
name
|
||||||
for name_key in self._used_names
|
for name_key in self._used_names
|
||||||
for name in self._filter(
|
for name in self._filter(
|
||||||
_get_definition_names(self._parso_cache_node, self._used_names, name_key),
|
_get_definition_names(self._used_names, name_key),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -125,7 +100,7 @@ class _AbstractUsedNamesFilter(AbstractFilter):
|
|||||||
return '<%s: %s>' % (self.__class__.__name__, self.parent_context)
|
return '<%s: %s>' % (self.__class__.__name__, self.parent_context)
|
||||||
|
|
||||||
|
|
||||||
class ParserTreeFilter(_AbstractUsedNamesFilter):
|
class ParserTreeFilter(AbstractUsedNamesFilter):
|
||||||
def __init__(self, parent_context, node_context=None, until_position=None,
|
def __init__(self, parent_context, node_context=None, until_position=None,
|
||||||
origin_scope=None):
|
origin_scope=None):
|
||||||
"""
|
"""
|
||||||
@@ -134,7 +109,10 @@ class ParserTreeFilter(_AbstractUsedNamesFilter):
|
|||||||
value, but for some type inference it's important to have a local
|
value, but for some type inference it's important to have a local
|
||||||
value of the other classes.
|
value of the other classes.
|
||||||
"""
|
"""
|
||||||
super().__init__(parent_context, node_context)
|
if node_context is None:
|
||||||
|
node_context = parent_context
|
||||||
|
super().__init__(parent_context, node_context.tree_node)
|
||||||
|
self._node_context = node_context
|
||||||
self._origin_scope = origin_scope
|
self._origin_scope = origin_scope
|
||||||
self._until_position = until_position
|
self._until_position = until_position
|
||||||
|
|
||||||
@@ -148,7 +126,7 @@ class ParserTreeFilter(_AbstractUsedNamesFilter):
|
|||||||
if parent.type == 'trailer':
|
if parent.type == 'trailer':
|
||||||
return False
|
return False
|
||||||
base_node = parent if parent.type in ('classdef', 'funcdef') else name
|
base_node = parent if parent.type in ('classdef', 'funcdef') else name
|
||||||
return get_cached_parent_scope(self._parso_cache_node, base_node) == self._parser_scope
|
return get_cached_parent_scope(self._used_names, base_node) == self._parser_scope
|
||||||
|
|
||||||
def _check_flows(self, names):
|
def _check_flows(self, names):
|
||||||
for name in sorted(names, key=lambda name: name.start_pos, reverse=True):
|
for name in sorted(names, key=lambda name: name.start_pos, reverse=True):
|
||||||
@@ -180,7 +158,7 @@ class _FunctionExecutionFilter(ParserTreeFilter):
|
|||||||
@to_list
|
@to_list
|
||||||
def _convert_names(self, names):
|
def _convert_names(self, names):
|
||||||
for name in names:
|
for name in names:
|
||||||
param = name.search_ancestor('param')
|
param = search_ancestor(name, 'param')
|
||||||
# Here we don't need to check if the param is a default/annotation,
|
# Here we don't need to check if the param is a default/annotation,
|
||||||
# because those are not definitions and never make it to this
|
# because those are not definitions and never make it to this
|
||||||
# point.
|
# point.
|
||||||
@@ -204,7 +182,7 @@ class AnonymousFunctionExecutionFilter(_FunctionExecutionFilter):
|
|||||||
return AnonymousParamName(self._function_value, name)
|
return AnonymousParamName(self._function_value, name)
|
||||||
|
|
||||||
|
|
||||||
class GlobalNameFilter(_AbstractUsedNamesFilter):
|
class GlobalNameFilter(AbstractUsedNamesFilter):
|
||||||
def get(self, name):
|
def get(self, name):
|
||||||
try:
|
try:
|
||||||
names = self._used_names[name]
|
names = self._used_names[name]
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
|
|||||||
check for -> a is a string). There's big potential in these checks.
|
check for -> a is a string). There's big potential in these checks.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from parso.tree import search_ancestor
|
||||||
from parso.python.tree import Name
|
from parso.python.tree import Name
|
||||||
|
|
||||||
from jedi import settings
|
from jedi import settings
|
||||||
@@ -75,7 +76,7 @@ def check_flow_information(value, flow, search_name, pos):
|
|||||||
])
|
])
|
||||||
|
|
||||||
for name in names:
|
for name in names:
|
||||||
ass = name.search_ancestor('assert_stmt')
|
ass = search_ancestor(name, 'assert_stmt')
|
||||||
if ass is not None:
|
if ass is not None:
|
||||||
result = _check_isinstance_type(value, ass.assertion, search_name)
|
result = _check_isinstance_type(value, ass.assertion, search_name)
|
||||||
if result is not None:
|
if result is not None:
|
||||||
|
|||||||
@@ -196,43 +196,13 @@ def py__annotations__(funcdef):
|
|||||||
return dct
|
return dct
|
||||||
|
|
||||||
|
|
||||||
def resolve_forward_references(context, all_annotations):
|
|
||||||
def resolve(node):
|
|
||||||
if node is None or node.type != 'string':
|
|
||||||
return node
|
|
||||||
|
|
||||||
node = _get_forward_reference_node(
|
|
||||||
context,
|
|
||||||
context.inference_state.compiled_subprocess.safe_literal_eval(
|
|
||||||
node.value,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
if node is None:
|
|
||||||
# There was a string, but it's not a valid annotation
|
|
||||||
return None
|
|
||||||
|
|
||||||
# The forward reference tree has an additional root node ('eval_input')
|
|
||||||
# that we don't want. Extract the node we do want, that is equivalent to
|
|
||||||
# the nodes returned by `py__annotations__` for a non-quoted node.
|
|
||||||
node = node.children[0]
|
|
||||||
|
|
||||||
return node
|
|
||||||
|
|
||||||
return {name: resolve(node) for name, node in all_annotations.items()}
|
|
||||||
|
|
||||||
|
|
||||||
@inference_state_method_cache()
|
@inference_state_method_cache()
|
||||||
def infer_return_types(function, arguments):
|
def infer_return_types(function, arguments):
|
||||||
"""
|
"""
|
||||||
Infers the type of a function's return value,
|
Infers the type of a function's return value,
|
||||||
according to type annotations.
|
according to type annotations.
|
||||||
"""
|
"""
|
||||||
context = function.get_default_param_context()
|
all_annotations = py__annotations__(function.tree_node)
|
||||||
all_annotations = resolve_forward_references(
|
|
||||||
context,
|
|
||||||
py__annotations__(function.tree_node),
|
|
||||||
)
|
|
||||||
annotation = all_annotations.get("return", None)
|
annotation = all_annotations.get("return", None)
|
||||||
if annotation is None:
|
if annotation is None:
|
||||||
# If there is no Python 3-type annotation, look for an annotation
|
# If there is no Python 3-type annotation, look for an annotation
|
||||||
@@ -247,10 +217,11 @@ def infer_return_types(function, arguments):
|
|||||||
return NO_VALUES
|
return NO_VALUES
|
||||||
|
|
||||||
return _infer_annotation_string(
|
return _infer_annotation_string(
|
||||||
context,
|
function.get_default_param_context(),
|
||||||
match.group(1).strip()
|
match.group(1).strip()
|
||||||
).execute_annotation()
|
).execute_annotation()
|
||||||
|
|
||||||
|
context = function.get_default_param_context()
|
||||||
unknown_type_vars = find_unknown_type_vars(context, annotation)
|
unknown_type_vars = find_unknown_type_vars(context, annotation)
|
||||||
annotation_values = infer_annotation(context, annotation)
|
annotation_values = infer_annotation(context, annotation)
|
||||||
if not unknown_type_vars:
|
if not unknown_type_vars:
|
||||||
@@ -402,10 +373,6 @@ def find_type_from_comment_hint_for(context, node, name):
|
|||||||
|
|
||||||
|
|
||||||
def find_type_from_comment_hint_with(context, node, name):
|
def find_type_from_comment_hint_with(context, node, name):
|
||||||
if len(node.children) > 4:
|
|
||||||
# In case there are multiple with_items, we do not want a type hint for
|
|
||||||
# now.
|
|
||||||
return []
|
|
||||||
assert len(node.children[1].children) == 3, \
|
assert len(node.children[1].children) == 3, \
|
||||||
"Can only be here when children[1] is 'foo() as f'"
|
"Can only be here when children[1] is 'foo() as f'"
|
||||||
varlist = node.children[1].children[2]
|
varlist = node.children[1].children[2]
|
||||||
|
|||||||
@@ -86,8 +86,6 @@ class StubFilter(ParserTreeFilter):
|
|||||||
# Imports in stub files are only public if they have an "as"
|
# Imports in stub files are only public if they have an "as"
|
||||||
# export.
|
# export.
|
||||||
definition = name.get_definition()
|
definition = name.get_definition()
|
||||||
if definition is None:
|
|
||||||
return False
|
|
||||||
if definition.type in ('import_from', 'import_name'):
|
if definition.type in ('import_from', 'import_name'):
|
||||||
if name.parent.type not in ('import_as_name', 'dotted_as_name'):
|
if name.parent.type not in ('import_as_name', 'dotted_as_name'):
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ from pathlib import Path
|
|||||||
|
|
||||||
from jedi import settings
|
from jedi import settings
|
||||||
from jedi.file_io import FileIO
|
from jedi.file_io import FileIO
|
||||||
|
from jedi._compatibility import cast_path
|
||||||
from jedi.parser_utils import get_cached_code_lines
|
from jedi.parser_utils import get_cached_code_lines
|
||||||
from jedi.inference.base_value import ValueSet, NO_VALUES
|
from jedi.inference.base_value import ValueSet, NO_VALUES
|
||||||
from jedi.inference.gradual.stub_value import TypingModuleWrapper, StubModuleValue
|
from jedi.inference.gradual.stub_value import TypingModuleWrapper, StubModuleValue
|
||||||
@@ -43,6 +44,7 @@ def _create_stub_map(directory_path_info):
|
|||||||
return
|
return
|
||||||
|
|
||||||
for entry in listed:
|
for entry in listed:
|
||||||
|
entry = cast_path(entry)
|
||||||
path = os.path.join(directory_path_info.path, entry)
|
path = os.path.join(directory_path_info.path, entry)
|
||||||
if os.path.isdir(path):
|
if os.path.isdir(path):
|
||||||
init = os.path.join(path, '__init__.pyi')
|
init = os.path.join(path, '__init__.pyi')
|
||||||
@@ -167,6 +169,7 @@ def _try_to_load_stub(inference_state, import_names, python_value_set,
|
|||||||
if len(import_names) == 1:
|
if len(import_names) == 1:
|
||||||
# foo-stubs
|
# foo-stubs
|
||||||
for p in sys_path:
|
for p in sys_path:
|
||||||
|
p = cast_path(p)
|
||||||
init = os.path.join(p, *import_names) + '-stubs' + os.path.sep + '__init__.pyi'
|
init = os.path.join(p, *import_names) + '-stubs' + os.path.sep + '__init__.pyi'
|
||||||
m = _try_to_load_stub_from_file(
|
m = _try_to_load_stub_from_file(
|
||||||
inference_state,
|
inference_state,
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ _TYPE_ALIAS_TYPES = {
|
|||||||
'DefaultDict': 'collections.defaultdict',
|
'DefaultDict': 'collections.defaultdict',
|
||||||
'Deque': 'collections.deque',
|
'Deque': 'collections.deque',
|
||||||
}
|
}
|
||||||
_PROXY_TYPES = 'Optional Union ClassVar Annotated'.split()
|
_PROXY_TYPES = 'Optional Union ClassVar'.split()
|
||||||
|
|
||||||
|
|
||||||
class TypingModuleName(NameWrapper):
|
class TypingModuleName(NameWrapper):
|
||||||
@@ -113,7 +113,7 @@ class ProxyWithGenerics(BaseTypingClassWithGenerics):
|
|||||||
elif string_name == 'Type':
|
elif string_name == 'Type':
|
||||||
# The type is actually already given in the index_value
|
# The type is actually already given in the index_value
|
||||||
return self._generics_manager[0]
|
return self._generics_manager[0]
|
||||||
elif string_name in ['ClassVar', 'Annotated']:
|
elif string_name == 'ClassVar':
|
||||||
# For now don't do anything here, ClassVars are always used.
|
# For now don't do anything here, ClassVars are always used.
|
||||||
return self._generics_manager[0].execute_annotation()
|
return self._generics_manager[0].execute_annotation()
|
||||||
|
|
||||||
@@ -294,9 +294,6 @@ class Callable(BaseTypingInstance):
|
|||||||
from jedi.inference.gradual.annotation import infer_return_for_callable
|
from jedi.inference.gradual.annotation import infer_return_for_callable
|
||||||
return infer_return_for_callable(arguments, param_values, result_values)
|
return infer_return_for_callable(arguments, param_values, result_values)
|
||||||
|
|
||||||
def py__get__(self, instance, class_value):
|
|
||||||
return ValueSet([self])
|
|
||||||
|
|
||||||
|
|
||||||
class Tuple(BaseTypingInstance):
|
class Tuple(BaseTypingInstance):
|
||||||
def _is_homogenous(self):
|
def _is_homogenous(self):
|
||||||
@@ -434,9 +431,6 @@ class NewType(Value):
|
|||||||
from jedi.inference.compiled.value import CompiledValueName
|
from jedi.inference.compiled.value import CompiledValueName
|
||||||
return CompiledValueName(self, 'NewType')
|
return CompiledValueName(self, 'NewType')
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return '<NewType: %s>%s' % (self.tree_node, self._type_value_set)
|
|
||||||
|
|
||||||
|
|
||||||
class CastFunction(ValueWrapper):
|
class CastFunction(ValueWrapper):
|
||||||
@repack_with_argument_clinic('type, object, /')
|
@repack_with_argument_clinic('type, object, /')
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from parso.python import tree
|
from parso.python import tree
|
||||||
|
from parso.tree import search_ancestor
|
||||||
|
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi import settings
|
from jedi import settings
|
||||||
@@ -94,7 +95,7 @@ def goto_import(context, tree_name):
|
|||||||
|
|
||||||
|
|
||||||
def _prepare_infer_import(module_context, tree_name):
|
def _prepare_infer_import(module_context, tree_name):
|
||||||
import_node = tree_name.search_ancestor('import_name', 'import_from')
|
import_node = search_ancestor(tree_name, 'import_name', 'import_from')
|
||||||
import_path = import_node.get_path_for_name(tree_name)
|
import_path = import_node.get_path_for_name(tree_name)
|
||||||
from_import_name = None
|
from_import_name = None
|
||||||
try:
|
try:
|
||||||
@@ -421,13 +422,20 @@ def import_module(inference_state, import_names, parent_module_value, sys_path):
|
|||||||
# The module might not be a package.
|
# The module might not be a package.
|
||||||
return NO_VALUES
|
return NO_VALUES
|
||||||
|
|
||||||
file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(
|
for path in paths:
|
||||||
string=import_names[-1],
|
# At the moment we are only using one path. So this is
|
||||||
path=paths,
|
# not important to be correct.
|
||||||
full_name=module_name,
|
if not isinstance(path, list):
|
||||||
is_global_search=False,
|
path = [path]
|
||||||
)
|
file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(
|
||||||
if is_pkg is None:
|
string=import_names[-1],
|
||||||
|
path=path,
|
||||||
|
full_name=module_name,
|
||||||
|
is_global_search=False,
|
||||||
|
)
|
||||||
|
if is_pkg is not None:
|
||||||
|
break
|
||||||
|
else:
|
||||||
return NO_VALUES
|
return NO_VALUES
|
||||||
|
|
||||||
if isinstance(file_io_or_ns, ImplicitNSInfo):
|
if isinstance(file_io_or_ns, ImplicitNSInfo):
|
||||||
@@ -479,7 +487,7 @@ def _load_builtin_module(inference_state, import_names=None, sys_path=None):
|
|||||||
if sys_path is None:
|
if sys_path is None:
|
||||||
sys_path = inference_state.get_sys_path()
|
sys_path = inference_state.get_sys_path()
|
||||||
if not project._load_unsafe_extensions:
|
if not project._load_unsafe_extensions:
|
||||||
safe_paths = set(project._get_base_sys_path(inference_state))
|
safe_paths = project._get_base_sys_path(inference_state)
|
||||||
sys_path = [p for p in sys_path if p in safe_paths]
|
sys_path = [p for p in sys_path if p in safe_paths]
|
||||||
|
|
||||||
dotted_name = '.'.join(import_names)
|
dotted_name = '.'.join(import_names)
|
||||||
@@ -548,7 +556,7 @@ def load_namespace_from_path(inference_state, folder_io):
|
|||||||
|
|
||||||
|
|
||||||
def follow_error_node_imports_if_possible(context, name):
|
def follow_error_node_imports_if_possible(context, name):
|
||||||
error_node = name.search_ancestor('error_node')
|
error_node = tree.search_ancestor(name, 'error_node')
|
||||||
if error_node is not None:
|
if error_node is not None:
|
||||||
# Get the first command start of a started simple_stmt. The error
|
# Get the first command start of a started simple_stmt. The error
|
||||||
# node is sometimes a small_stmt and sometimes a simple_stmt. Check
|
# node is sometimes a small_stmt and sometimes a simple_stmt. Check
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ from abc import abstractmethod
|
|||||||
from inspect import Parameter
|
from inspect import Parameter
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
|
|
||||||
|
from parso.tree import search_ancestor
|
||||||
|
|
||||||
from jedi.parser_utils import find_statement_documentation, clean_scope_docstring
|
from jedi.parser_utils import find_statement_documentation, clean_scope_docstring
|
||||||
from jedi.inference.utils import unite
|
from jedi.inference.utils import unite
|
||||||
from jedi.inference.base_value import ValueSet, NO_VALUES
|
from jedi.inference.base_value import ValueSet, NO_VALUES
|
||||||
@@ -110,7 +112,7 @@ class AbstractTreeName(AbstractNameDefinition):
|
|||||||
self.tree_name = tree_name
|
self.tree_name = tree_name
|
||||||
|
|
||||||
def get_qualified_names(self, include_module_names=False):
|
def get_qualified_names(self, include_module_names=False):
|
||||||
import_node = self.tree_name.search_ancestor('import_name', 'import_from')
|
import_node = search_ancestor(self.tree_name, 'import_name', 'import_from')
|
||||||
# For import nodes we cannot just have names, because it's very unclear
|
# For import nodes we cannot just have names, because it's very unclear
|
||||||
# how they would look like. For now we just ignore them in most cases.
|
# how they would look like. For now we just ignore them in most cases.
|
||||||
# In case of level == 1, it works always, because it's like a submodule
|
# In case of level == 1, it works always, because it's like a submodule
|
||||||
@@ -203,13 +205,15 @@ class AbstractTreeName(AbstractNameDefinition):
|
|||||||
values = infer_call_of_leaf(context, name, cut_own_trailer=True)
|
values = infer_call_of_leaf(context, name, cut_own_trailer=True)
|
||||||
return values.goto(name, name_context=context)
|
return values.goto(name, name_context=context)
|
||||||
else:
|
else:
|
||||||
stmt = name.search_ancestor('expr_stmt', 'lambdef') or name
|
stmt = search_ancestor(
|
||||||
|
name, 'expr_stmt', 'lambdef'
|
||||||
|
) or name
|
||||||
if stmt.type == 'lambdef':
|
if stmt.type == 'lambdef':
|
||||||
stmt = name
|
stmt = name
|
||||||
return context.goto(name, position=stmt.start_pos)
|
return context.goto(name, position=stmt.start_pos)
|
||||||
|
|
||||||
def is_import(self):
|
def is_import(self):
|
||||||
imp = self.tree_name.search_ancestor('import_from', 'import_name')
|
imp = search_ancestor(self.tree_name, 'import_from', 'import_name')
|
||||||
return imp is not None
|
return imp is not None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -244,7 +248,7 @@ class ValueNameMixin:
|
|||||||
|
|
||||||
def get_defining_qualified_value(self):
|
def get_defining_qualified_value(self):
|
||||||
context = self.parent_context
|
context = self.parent_context
|
||||||
if context is not None and (context.is_module() or context.is_class()):
|
if context.is_module() or context.is_class():
|
||||||
return self.parent_context.get_value() # Might be None
|
return self.parent_context.get_value() # Might be None
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -337,12 +341,6 @@ class TreeNameDefinition(AbstractTreeName):
|
|||||||
def py__doc__(self):
|
def py__doc__(self):
|
||||||
api_type = self.api_type
|
api_type = self.api_type
|
||||||
if api_type in ('function', 'class', 'property'):
|
if api_type in ('function', 'class', 'property'):
|
||||||
if self.parent_context.get_root_context().is_stub():
|
|
||||||
from jedi.inference.gradual.conversion import convert_names
|
|
||||||
names = convert_names([self], prefer_stub_to_compiled=False)
|
|
||||||
if self not in names:
|
|
||||||
return _merge_name_docs(names)
|
|
||||||
|
|
||||||
# Make sure the names are not TreeNameDefinitions anymore.
|
# Make sure the names are not TreeNameDefinitions anymore.
|
||||||
return clean_scope_docstring(self.tree_name.get_definition())
|
return clean_scope_docstring(self.tree_name.get_definition())
|
||||||
|
|
||||||
@@ -410,9 +408,6 @@ class ParamNameInterface(_ParamMixin):
|
|||||||
return 2
|
return 2
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def infer_default(self):
|
|
||||||
return NO_VALUES
|
|
||||||
|
|
||||||
|
|
||||||
class BaseTreeParamName(ParamNameInterface, AbstractTreeName):
|
class BaseTreeParamName(ParamNameInterface, AbstractTreeName):
|
||||||
annotation_node = None
|
annotation_node = None
|
||||||
@@ -447,7 +442,7 @@ class _ActualTreeParamName(BaseTreeParamName):
|
|||||||
self.function_value = function_value
|
self.function_value = function_value
|
||||||
|
|
||||||
def _get_param_node(self):
|
def _get_param_node(self):
|
||||||
return self.tree_name.search_ancestor('param')
|
return search_ancestor(self.tree_name, 'param')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def annotation_node(self):
|
def annotation_node(self):
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ count the function calls.
|
|||||||
Settings
|
Settings
|
||||||
~~~~~~~~~~
|
~~~~~~~~~~
|
||||||
|
|
||||||
Recursion settings are important if you don't want extremely
|
Recursion settings are important if you don't want extremly
|
||||||
recursive python code to go absolutely crazy.
|
recursive python code to go absolutely crazy.
|
||||||
|
|
||||||
The default values are based on experiments while completing the |jedi| library
|
The default values are based on experiments while completing the |jedi| library
|
||||||
|
|||||||
@@ -180,34 +180,26 @@ def _check_fs(inference_state, file_io, regex):
|
|||||||
return m.as_context()
|
return m.as_context()
|
||||||
|
|
||||||
|
|
||||||
def gitignored_paths(folder_io, file_io):
|
def gitignored_lines(folder_io, file_io):
|
||||||
ignored_paths_abs = set()
|
ignored_paths = set()
|
||||||
ignored_paths_rel = set()
|
ignored_names = set()
|
||||||
|
|
||||||
for l in file_io.read().splitlines():
|
for l in file_io.read().splitlines():
|
||||||
if not l or l.startswith(b'#') or l.startswith(b'!') or b'*' in l:
|
if not l or l.startswith(b'#'):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
p = l.decode('utf-8', 'ignore').rstrip('/')
|
p = l.decode('utf-8', 'ignore')
|
||||||
if '/' in p:
|
if p.startswith('/'):
|
||||||
name = p.lstrip('/')
|
name = p[1:]
|
||||||
ignored_paths_abs.add(os.path.join(folder_io.path, name))
|
if name.endswith(os.path.sep):
|
||||||
|
name = name[:-1]
|
||||||
|
ignored_paths.add(os.path.join(folder_io.path, name))
|
||||||
else:
|
else:
|
||||||
name = p
|
ignored_names.add(p)
|
||||||
ignored_paths_rel.add((folder_io.path, name))
|
return ignored_paths, ignored_names
|
||||||
|
|
||||||
return ignored_paths_abs, ignored_paths_rel
|
|
||||||
|
|
||||||
|
|
||||||
def expand_relative_ignore_paths(folder_io, relative_paths):
|
|
||||||
curr_path = folder_io.path
|
|
||||||
return {os.path.join(curr_path, p[1]) for p in relative_paths if curr_path.startswith(p[0])}
|
|
||||||
|
|
||||||
|
|
||||||
def recurse_find_python_folders_and_files(folder_io, except_paths=()):
|
def recurse_find_python_folders_and_files(folder_io, except_paths=()):
|
||||||
except_paths = set(except_paths)
|
except_paths = set(except_paths)
|
||||||
except_paths_relative = set()
|
|
||||||
|
|
||||||
for root_folder_io, folder_ios, file_ios in folder_io.walk():
|
for root_folder_io, folder_ios, file_ios in folder_io.walk():
|
||||||
# Delete folders that we don't want to iterate over.
|
# Delete folders that we don't want to iterate over.
|
||||||
for file_io in file_ios:
|
for file_io in file_ios:
|
||||||
@@ -217,21 +209,14 @@ def recurse_find_python_folders_and_files(folder_io, except_paths=()):
|
|||||||
yield None, file_io
|
yield None, file_io
|
||||||
|
|
||||||
if path.name == '.gitignore':
|
if path.name == '.gitignore':
|
||||||
ignored_paths_abs, ignored_paths_rel = gitignored_paths(
|
ignored_paths, ignored_names = \
|
||||||
root_folder_io, file_io
|
gitignored_lines(root_folder_io, file_io)
|
||||||
)
|
except_paths |= ignored_paths
|
||||||
except_paths |= ignored_paths_abs
|
|
||||||
except_paths_relative |= ignored_paths_rel
|
|
||||||
|
|
||||||
except_paths_relative_expanded = expand_relative_ignore_paths(
|
|
||||||
root_folder_io, except_paths_relative
|
|
||||||
)
|
|
||||||
|
|
||||||
folder_ios[:] = [
|
folder_ios[:] = [
|
||||||
folder_io
|
folder_io
|
||||||
for folder_io in folder_ios
|
for folder_io in folder_ios
|
||||||
if folder_io.path not in except_paths
|
if folder_io.path not in except_paths
|
||||||
and folder_io.path not in except_paths_relative_expanded
|
|
||||||
and folder_io.get_base_name() not in _IGNORE_FOLDERS
|
and folder_io.get_base_name() not in _IGNORE_FOLDERS
|
||||||
]
|
]
|
||||||
for folder_io in folder_ios:
|
for folder_io in folder_ios:
|
||||||
@@ -297,13 +282,12 @@ def get_module_contexts_containing_name(inference_state, module_contexts, name,
|
|||||||
limit_reduction=limit_reduction)
|
limit_reduction=limit_reduction)
|
||||||
|
|
||||||
|
|
||||||
def search_in_file_ios(inference_state, file_io_iterator, name,
|
def search_in_file_ios(inference_state, file_io_iterator, name, limit_reduction=1):
|
||||||
limit_reduction=1, complete=False):
|
|
||||||
parse_limit = _PARSED_FILE_LIMIT / limit_reduction
|
parse_limit = _PARSED_FILE_LIMIT / limit_reduction
|
||||||
open_limit = _OPENED_FILE_LIMIT / limit_reduction
|
open_limit = _OPENED_FILE_LIMIT / limit_reduction
|
||||||
file_io_count = 0
|
file_io_count = 0
|
||||||
parsed_file_count = 0
|
parsed_file_count = 0
|
||||||
regex = re.compile(r'\b' + re.escape(name) + (r'' if complete else r'\b'))
|
regex = re.compile(r'\b' + re.escape(name) + r'\b')
|
||||||
for file_io in file_io_iterator:
|
for file_io in file_io_iterator:
|
||||||
file_io_count += 1
|
file_io_count += 1
|
||||||
m = _check_fs(inference_state, file_io, regex)
|
m = _check_fs(inference_state, file_io, regex)
|
||||||
|
|||||||
@@ -18,14 +18,11 @@ from jedi.inference.helpers import is_big_annoying_library
|
|||||||
|
|
||||||
|
|
||||||
def _iter_nodes_for_param(param_name):
|
def _iter_nodes_for_param(param_name):
|
||||||
|
from parso.python.tree import search_ancestor
|
||||||
from jedi.inference.arguments import TreeArguments
|
from jedi.inference.arguments import TreeArguments
|
||||||
|
|
||||||
execution_context = param_name.parent_context
|
execution_context = param_name.parent_context
|
||||||
# Walk up the parso tree to get the FunctionNode we want. We use the parso
|
function_node = execution_context.tree_node
|
||||||
# tree rather than going via the execution context so that we're agnostic of
|
|
||||||
# the specific scope we're evaluating within (i.e: module or function,
|
|
||||||
# etc.).
|
|
||||||
function_node = param_name.tree_name.search_ancestor('funcdef', 'lambdef')
|
|
||||||
module_node = function_node.get_root_node()
|
module_node = function_node.get_root_node()
|
||||||
start = function_node.children[-1].start_pos
|
start = function_node.children[-1].start_pos
|
||||||
end = function_node.children[-1].end_pos
|
end = function_node.children[-1].end_pos
|
||||||
@@ -35,7 +32,7 @@ def _iter_nodes_for_param(param_name):
|
|||||||
argument = name.parent
|
argument = name.parent
|
||||||
if argument.type == 'argument' \
|
if argument.type == 'argument' \
|
||||||
and argument.children[0] == '*' * param_name.star_count:
|
and argument.children[0] == '*' * param_name.star_count:
|
||||||
trailer = argument.search_ancestor('trailer')
|
trailer = search_ancestor(argument, 'trailer')
|
||||||
if trailer is not None: # Make sure we're in a function
|
if trailer is not None: # Make sure we're in a function
|
||||||
context = execution_context.create_context(trailer)
|
context = execution_context.create_context(trailer)
|
||||||
if _goes_to_param_name(param_name, context, name):
|
if _goes_to_param_name(param_name, context, name):
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
Functions inferring the syntax tree.
|
Functions inferring the syntax tree.
|
||||||
"""
|
"""
|
||||||
import copy
|
import copy
|
||||||
import itertools
|
|
||||||
|
|
||||||
from parso.python import tree
|
from parso.python import tree
|
||||||
|
|
||||||
@@ -251,8 +250,6 @@ def _infer_node(context, element):
|
|||||||
return NO_VALUES
|
return NO_VALUES
|
||||||
elif typ == 'namedexpr_test':
|
elif typ == 'namedexpr_test':
|
||||||
return context.infer_node(element.children[2])
|
return context.infer_node(element.children[2])
|
||||||
elif typ == 'star_expr':
|
|
||||||
return NO_VALUES
|
|
||||||
else:
|
else:
|
||||||
return infer_or_test(context, element)
|
return infer_or_test(context, element)
|
||||||
|
|
||||||
@@ -290,7 +287,7 @@ def infer_atom(context, atom):
|
|||||||
state = context.inference_state
|
state = context.inference_state
|
||||||
if atom.type == 'name':
|
if atom.type == 'name':
|
||||||
# This is the first global lookup.
|
# This is the first global lookup.
|
||||||
stmt = atom.search_ancestor('expr_stmt', 'lambdef', 'if_stmt') or atom
|
stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef', 'if_stmt') or atom
|
||||||
if stmt.type == 'if_stmt':
|
if stmt.type == 'if_stmt':
|
||||||
if not any(n.start_pos <= atom.start_pos < n.end_pos for n in stmt.get_test_nodes()):
|
if not any(n.start_pos <= atom.start_pos < n.end_pos for n in stmt.get_test_nodes()):
|
||||||
stmt = atom
|
stmt = atom
|
||||||
@@ -331,8 +328,8 @@ def infer_atom(context, atom):
|
|||||||
c = atom.children
|
c = atom.children
|
||||||
# Parentheses without commas are not tuples.
|
# Parentheses without commas are not tuples.
|
||||||
if c[0] == '(' and not len(c) == 2 \
|
if c[0] == '(' and not len(c) == 2 \
|
||||||
and not (c[1].type == 'testlist_comp'
|
and not(c[1].type == 'testlist_comp'
|
||||||
and len(c[1].children) > 1):
|
and len(c[1].children) > 1):
|
||||||
return context.infer_node(c[1])
|
return context.infer_node(c[1])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -436,7 +433,7 @@ def _infer_expr_stmt(context, stmt, seek_name=None):
|
|||||||
else:
|
else:
|
||||||
operator = copy.copy(first_operator)
|
operator = copy.copy(first_operator)
|
||||||
operator.value = operator.value[:-1]
|
operator.value = operator.value[:-1]
|
||||||
for_stmt = stmt.search_ancestor('for_stmt')
|
for_stmt = tree.search_ancestor(stmt, 'for_stmt')
|
||||||
if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \
|
if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \
|
||||||
and parser_utils.for_stmt_defines_one_name(for_stmt):
|
and parser_utils.for_stmt_defines_one_name(for_stmt):
|
||||||
# Iterate through result and add the values, that's possible
|
# Iterate through result and add the values, that's possible
|
||||||
@@ -495,10 +492,8 @@ def infer_factor(value_set, operator):
|
|||||||
elif operator == 'not':
|
elif operator == 'not':
|
||||||
b = value.py__bool__()
|
b = value.py__bool__()
|
||||||
if b is None: # Uncertainty.
|
if b is None: # Uncertainty.
|
||||||
yield list(value.inference_state.builtins_module.py__getattribute__('bool')
|
return
|
||||||
.execute_annotation()).pop()
|
yield compiled.create_simple_object(value.inference_state, not b)
|
||||||
else:
|
|
||||||
yield compiled.create_simple_object(value.inference_state, not b)
|
|
||||||
else:
|
else:
|
||||||
yield value
|
yield value
|
||||||
|
|
||||||
@@ -520,20 +515,10 @@ def _literals_to_types(inference_state, result):
|
|||||||
|
|
||||||
def _infer_comparison(context, left_values, operator, right_values):
|
def _infer_comparison(context, left_values, operator, right_values):
|
||||||
state = context.inference_state
|
state = context.inference_state
|
||||||
if isinstance(operator, str):
|
|
||||||
operator_str = operator
|
|
||||||
else:
|
|
||||||
operator_str = str(operator.value)
|
|
||||||
if not left_values or not right_values:
|
if not left_values or not right_values:
|
||||||
# illegal slices e.g. cause left/right_result to be None
|
# illegal slices e.g. cause left/right_result to be None
|
||||||
result = (left_values or NO_VALUES) | (right_values or NO_VALUES)
|
result = (left_values or NO_VALUES) | (right_values or NO_VALUES)
|
||||||
return _literals_to_types(state, result)
|
return _literals_to_types(state, result)
|
||||||
elif operator_str == "|" and all(
|
|
||||||
value.is_class() or value.is_compiled()
|
|
||||||
for value in itertools.chain(left_values, right_values)
|
|
||||||
):
|
|
||||||
# ^^^ A naive hack for PEP 604
|
|
||||||
return ValueSet.from_sets((left_values, right_values))
|
|
||||||
else:
|
else:
|
||||||
# I don't think there's a reasonable chance that a string
|
# I don't think there's a reasonable chance that a string
|
||||||
# operation is still correct, once we pass something like six
|
# operation is still correct, once we pass something like six
|
||||||
@@ -549,7 +534,7 @@ def _infer_comparison(context, left_values, operator, right_values):
|
|||||||
|
|
||||||
|
|
||||||
def _is_annotation_name(name):
|
def _is_annotation_name(name):
|
||||||
ancestor = name.search_ancestor('param', 'funcdef', 'expr_stmt')
|
ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt')
|
||||||
if ancestor is None:
|
if ancestor is None:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -649,7 +634,7 @@ def _infer_comparison_part(inference_state, context, left, operator, right):
|
|||||||
_bool_to_value(inference_state, False)
|
_bool_to_value(inference_state, False)
|
||||||
])
|
])
|
||||||
elif str_operator in ('in', 'not in'):
|
elif str_operator in ('in', 'not in'):
|
||||||
return inference_state.builtins_module.py__getattribute__('bool').execute_annotation()
|
return NO_VALUES
|
||||||
|
|
||||||
def check(obj):
|
def check(obj):
|
||||||
"""Checks if a Jedi object is either a float or an int."""
|
"""Checks if a Jedi object is either a float or an int."""
|
||||||
@@ -699,15 +684,8 @@ def tree_name_to_values(inference_state, context, tree_name):
|
|||||||
|
|
||||||
if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign":
|
if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign":
|
||||||
correct_scope = parser_utils.get_parent_scope(name) == context.tree_node
|
correct_scope = parser_utils.get_parent_scope(name) == context.tree_node
|
||||||
ann_assign = expr_stmt.children[1]
|
|
||||||
if correct_scope:
|
if correct_scope:
|
||||||
found_annotation = True
|
found_annotation = True
|
||||||
if (
|
|
||||||
(ann_assign.children[1].type == 'name')
|
|
||||||
and (ann_assign.children[1].value == tree_name.value)
|
|
||||||
and context.parent_context
|
|
||||||
):
|
|
||||||
context = context.parent_context
|
|
||||||
value_set |= annotation.infer_annotation(
|
value_set |= annotation.infer_annotation(
|
||||||
context, expr_stmt.children[1].children[1]
|
context, expr_stmt.children[1].children[1]
|
||||||
).execute_annotation()
|
).execute_annotation()
|
||||||
@@ -760,13 +738,6 @@ def tree_name_to_values(inference_state, context, tree_name):
|
|||||||
types = infer_expr_stmt(context, node, tree_name)
|
types = infer_expr_stmt(context, node, tree_name)
|
||||||
elif typ == 'with_stmt':
|
elif typ == 'with_stmt':
|
||||||
value_managers = context.infer_node(node.get_test_node_from_name(tree_name))
|
value_managers = context.infer_node(node.get_test_node_from_name(tree_name))
|
||||||
if node.parent.type == 'async_stmt':
|
|
||||||
# In the case of `async with` statements, we need to
|
|
||||||
# first get the coroutine from the `__aenter__` method,
|
|
||||||
# then "unwrap" via the `__await__` method
|
|
||||||
enter_methods = value_managers.py__getattribute__('__aenter__')
|
|
||||||
coro = enter_methods.execute_with_values()
|
|
||||||
return coro.py__await__().py__stop_iteration_returns()
|
|
||||||
enter_methods = value_managers.py__getattribute__('__enter__')
|
enter_methods = value_managers.py__getattribute__('__enter__')
|
||||||
return enter_methods.execute_with_values()
|
return enter_methods.execute_with_values()
|
||||||
elif typ in ('import_from', 'import_name'):
|
elif typ in ('import_from', 'import_name'):
|
||||||
|
|||||||
@@ -186,6 +186,7 @@ def _get_buildout_script_paths(search_path: Path):
|
|||||||
directory that look like python files.
|
directory that look like python files.
|
||||||
|
|
||||||
:param search_path: absolute path to the module.
|
:param search_path: absolute path to the module.
|
||||||
|
:type search_path: str
|
||||||
"""
|
"""
|
||||||
project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg')
|
project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg')
|
||||||
if not project_root:
|
if not project_root:
|
||||||
@@ -204,7 +205,7 @@ def _get_buildout_script_paths(search_path: Path):
|
|||||||
except (UnicodeDecodeError, IOError) as e:
|
except (UnicodeDecodeError, IOError) as e:
|
||||||
# Probably a binary file; permission error or race cond. because
|
# Probably a binary file; permission error or race cond. because
|
||||||
# file got deleted. Ignore it.
|
# file got deleted. Ignore it.
|
||||||
debug.warning(str(e))
|
debug.warning(e)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ settings will stop this process.
|
|||||||
|
|
||||||
It is important to note that:
|
It is important to note that:
|
||||||
|
|
||||||
1. Array modifications work only in the current module.
|
1. Array modfications work only in the current module.
|
||||||
2. Jedi only checks Array additions; ``list.pop``, etc are ignored.
|
2. Jedi only checks Array additions; ``list.pop``, etc are ignored.
|
||||||
"""
|
"""
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from parso.python import tree
|
||||||
|
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi.inference.cache import inference_state_method_cache, CachedMetaClass
|
from jedi.inference.cache import inference_state_method_cache, CachedMetaClass
|
||||||
from jedi.inference import compiled
|
from jedi.inference import compiled
|
||||||
@@ -260,8 +262,8 @@ class BaseFunctionExecutionContext(ValueContext, TreeContextMixin):
|
|||||||
@recursion.execution_recursion_decorator(default=iter([]))
|
@recursion.execution_recursion_decorator(default=iter([]))
|
||||||
def get_yield_lazy_values(self, is_async=False):
|
def get_yield_lazy_values(self, is_async=False):
|
||||||
# TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend
|
# TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend
|
||||||
for_parents = [(y, y.search_ancestor('for_stmt', 'funcdef',
|
for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef',
|
||||||
'while_stmt', 'if_stmt'))
|
'while_stmt', 'if_stmt'))
|
||||||
for y in get_yield_exprs(self.inference_state, self.tree_node)]
|
for y in get_yield_exprs(self.inference_state, self.tree_node)]
|
||||||
|
|
||||||
# Calculate if the yields are placed within the same for loop.
|
# Calculate if the yields are placed within the same for loop.
|
||||||
@@ -342,8 +344,7 @@ class BaseFunctionExecutionContext(ValueContext, TreeContextMixin):
|
|||||||
GenericClass(c, TupleGenericManager(generics)) for c in async_classes
|
GenericClass(c, TupleGenericManager(generics)) for c in async_classes
|
||||||
).execute_annotation()
|
).execute_annotation()
|
||||||
else:
|
else:
|
||||||
# If there are annotations, prefer them over anything else.
|
if self.is_generator():
|
||||||
if self.is_generator() and not self.infer_annotations():
|
|
||||||
return ValueSet([iterable.Generator(inference_state, self)])
|
return ValueSet([iterable.Generator(inference_state, self)])
|
||||||
else:
|
else:
|
||||||
return self.get_return_values()
|
return self.get_return_values()
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
from abc import abstractproperty
|
from abc import abstractproperty
|
||||||
|
|
||||||
|
from parso.tree import search_ancestor
|
||||||
|
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi import settings
|
from jedi import settings
|
||||||
from jedi.inference import compiled
|
from jedi.inference import compiled
|
||||||
@@ -227,7 +229,7 @@ class _BaseTreeInstance(AbstractInstanceValue):
|
|||||||
new = node
|
new = node
|
||||||
while True:
|
while True:
|
||||||
func_node = new
|
func_node = new
|
||||||
new = new.search_ancestor('funcdef', 'classdef')
|
new = search_ancestor(new, 'funcdef', 'classdef')
|
||||||
if class_context.tree_node is new:
|
if class_context.tree_node is new:
|
||||||
func = FunctionValue.from_context(class_context, func_node)
|
func = FunctionValue.from_context(class_context, func_node)
|
||||||
bound_method = BoundMethod(self, class_context, func)
|
bound_method = BoundMethod(self, class_context, func)
|
||||||
@@ -496,7 +498,7 @@ class SelfName(TreeNameDefinition):
|
|||||||
return self._instance
|
return self._instance
|
||||||
|
|
||||||
def infer(self):
|
def infer(self):
|
||||||
stmt = self.tree_name.search_ancestor('expr_stmt')
|
stmt = search_ancestor(self.tree_name, 'expr_stmt')
|
||||||
if stmt is not None:
|
if stmt is not None:
|
||||||
if stmt.children[1].type == "annassign":
|
if stmt.children[1].type == "annassign":
|
||||||
from jedi.inference.gradual.annotation import infer_annotation
|
from jedi.inference.gradual.annotation import infer_annotation
|
||||||
|
|||||||
@@ -342,8 +342,6 @@ class SequenceLiteralValue(Sequence):
|
|||||||
else:
|
else:
|
||||||
with reraise_getitem_errors(TypeError, KeyError, IndexError):
|
with reraise_getitem_errors(TypeError, KeyError, IndexError):
|
||||||
node = self.get_tree_entries()[index]
|
node = self.get_tree_entries()[index]
|
||||||
if node == ':' or node.type == 'subscript':
|
|
||||||
return NO_VALUES
|
|
||||||
return self._defining_context.infer_node(node)
|
return self._defining_context.infer_node(node)
|
||||||
|
|
||||||
def py__iter__(self, contextualized_node=None):
|
def py__iter__(self, contextualized_node=None):
|
||||||
@@ -409,6 +407,16 @@ class SequenceLiteralValue(Sequence):
|
|||||||
else:
|
else:
|
||||||
return [array_node]
|
return [array_node]
|
||||||
|
|
||||||
|
def exact_key_items(self):
|
||||||
|
"""
|
||||||
|
Returns a generator of tuples like dict.items(), where the key is
|
||||||
|
resolved (as a string) and the values are still lazy values.
|
||||||
|
"""
|
||||||
|
for key_node, value in self.get_tree_entries():
|
||||||
|
for key in self._defining_context.infer_node(key_node):
|
||||||
|
if is_string(key):
|
||||||
|
yield key.get_safe_value(), LazyTreeValue(self._defining_context, value)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<%s of %s>" % (self.__class__.__name__, self.atom)
|
return "<%s of %s>" % (self.__class__.__name__, self.atom)
|
||||||
|
|
||||||
@@ -464,16 +472,6 @@ class DictLiteralValue(_DictMixin, SequenceLiteralValue, _DictKeyMixin):
|
|||||||
|
|
||||||
return ValueSet([FakeList(self.inference_state, lazy_values)])
|
return ValueSet([FakeList(self.inference_state, lazy_values)])
|
||||||
|
|
||||||
def exact_key_items(self):
|
|
||||||
"""
|
|
||||||
Returns a generator of tuples like dict.items(), where the key is
|
|
||||||
resolved (as a string) and the values are still lazy values.
|
|
||||||
"""
|
|
||||||
for key_node, value in self.get_tree_entries():
|
|
||||||
for key in self._defining_context.infer_node(key_node):
|
|
||||||
if is_string(key):
|
|
||||||
yield key.get_safe_value(), LazyTreeValue(self._defining_context, value)
|
|
||||||
|
|
||||||
def _dict_values(self):
|
def _dict_values(self):
|
||||||
return ValueSet.from_sets(
|
return ValueSet.from_sets(
|
||||||
self._defining_context.infer_node(v)
|
self._defining_context.infer_node(v)
|
||||||
|
|||||||
@@ -36,10 +36,6 @@ py__doc__() Returns the docstring for a value.
|
|||||||
====================================== ========================================
|
====================================== ========================================
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from typing import List, Optional, Tuple
|
|
||||||
|
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi.parser_utils import get_cached_parent_scope, expr_is_dotted, \
|
from jedi.parser_utils import get_cached_parent_scope, expr_is_dotted, \
|
||||||
function_is_property
|
function_is_property
|
||||||
@@ -51,15 +47,11 @@ from jedi.inference.filters import ParserTreeFilter
|
|||||||
from jedi.inference.names import TreeNameDefinition, ValueName
|
from jedi.inference.names import TreeNameDefinition, ValueName
|
||||||
from jedi.inference.arguments import unpack_arglist, ValuesArguments
|
from jedi.inference.arguments import unpack_arglist, ValuesArguments
|
||||||
from jedi.inference.base_value import ValueSet, iterator_to_value_set, \
|
from jedi.inference.base_value import ValueSet, iterator_to_value_set, \
|
||||||
NO_VALUES, ValueWrapper
|
NO_VALUES
|
||||||
from jedi.inference.context import ClassContext
|
from jedi.inference.context import ClassContext
|
||||||
from jedi.inference.value.function import FunctionAndClassBase, FunctionMixin
|
from jedi.inference.value.function import FunctionAndClassBase
|
||||||
from jedi.inference.value.decorator import Decoratee
|
|
||||||
from jedi.inference.gradual.generics import LazyGenericManager, TupleGenericManager
|
from jedi.inference.gradual.generics import LazyGenericManager, TupleGenericManager
|
||||||
from jedi.plugins import plugin_manager
|
from jedi.plugins import plugin_manager
|
||||||
from inspect import Parameter
|
|
||||||
from jedi.inference.names import BaseTreeParamName
|
|
||||||
from jedi.inference.signature import AbstractSignature
|
|
||||||
|
|
||||||
|
|
||||||
class ClassName(TreeNameDefinition):
|
class ClassName(TreeNameDefinition):
|
||||||
@@ -86,8 +78,6 @@ class ClassName(TreeNameDefinition):
|
|||||||
type_ = super().api_type
|
type_ = super().api_type
|
||||||
if type_ == 'function':
|
if type_ == 'function':
|
||||||
definition = self.tree_name.get_definition()
|
definition = self.tree_name.get_definition()
|
||||||
if definition is None:
|
|
||||||
return type_
|
|
||||||
if function_is_property(definition):
|
if function_is_property(definition):
|
||||||
# This essentially checks if there is an @property before
|
# This essentially checks if there is an @property before
|
||||||
# the function. @property could be something different, but
|
# the function. @property could be something different, but
|
||||||
@@ -124,10 +114,25 @@ class ClassFilter(ParserTreeFilter):
|
|||||||
while node is not None:
|
while node is not None:
|
||||||
if node == self._parser_scope or node == self.parent_context:
|
if node == self._parser_scope or node == self.parent_context:
|
||||||
return True
|
return True
|
||||||
node = get_cached_parent_scope(self._parso_cache_node, node)
|
node = get_cached_parent_scope(self._used_names, node)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _access_possible(self, name):
|
def _access_possible(self, name):
|
||||||
|
# Filter for ClassVar variables
|
||||||
|
# TODO this is not properly done, yet. It just checks for the string
|
||||||
|
# ClassVar in the annotation, which can be quite imprecise. If we
|
||||||
|
# wanted to do this correct, we would have to infer the ClassVar.
|
||||||
|
if not self._is_instance:
|
||||||
|
expr_stmt = name.get_definition()
|
||||||
|
if expr_stmt is not None and expr_stmt.type == 'expr_stmt':
|
||||||
|
annassign = expr_stmt.children[1]
|
||||||
|
if annassign.type == 'annassign':
|
||||||
|
# If there is an =, the variable is obviously also
|
||||||
|
# defined on the class.
|
||||||
|
if 'ClassVar' not in annassign.children[1].get_code() \
|
||||||
|
and '=' not in annassign.children:
|
||||||
|
return False
|
||||||
|
|
||||||
# Filter for name mangling of private variables like __foo
|
# Filter for name mangling of private variables like __foo
|
||||||
return not name.value.startswith('__') or name.value.endswith('__') \
|
return not name.value.startswith('__') or name.value.endswith('__') \
|
||||||
or self._equals_origin_scope()
|
or self._equals_origin_scope()
|
||||||
@@ -137,65 +142,6 @@ class ClassFilter(ParserTreeFilter):
|
|||||||
return [name for name in names if self._access_possible(name)]
|
return [name for name in names if self._access_possible(name)]
|
||||||
|
|
||||||
|
|
||||||
def init_param_value(arg_nodes) -> Optional[bool]:
|
|
||||||
"""
|
|
||||||
Returns:
|
|
||||||
|
|
||||||
- ``True`` if ``@dataclass(init=True)``
|
|
||||||
- ``False`` if ``@dataclass(init=False)``
|
|
||||||
- ``None`` if not specified ``@dataclass()``
|
|
||||||
"""
|
|
||||||
for arg_node in arg_nodes:
|
|
||||||
if (
|
|
||||||
arg_node.type == "argument"
|
|
||||||
and arg_node.children[0].value == "init"
|
|
||||||
):
|
|
||||||
if arg_node.children[2].value == "False":
|
|
||||||
return False
|
|
||||||
elif arg_node.children[2].value == "True":
|
|
||||||
return True
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_dataclass_param_names(cls) -> List[DataclassParamName]:
|
|
||||||
"""
|
|
||||||
``cls`` is a :class:`ClassMixin`. The type is only documented as mypy would
|
|
||||||
complain that some fields are missing.
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class A:
|
|
||||||
a: int
|
|
||||||
b: str = "toto"
|
|
||||||
|
|
||||||
For the previous example, the param names would be ``a`` and ``b``.
|
|
||||||
"""
|
|
||||||
param_names = []
|
|
||||||
filter_ = cls.as_context().get_global_filter()
|
|
||||||
for name in sorted(filter_.values(), key=lambda name: name.start_pos):
|
|
||||||
d = name.tree_name.get_definition()
|
|
||||||
annassign = d.children[1]
|
|
||||||
if d.type == 'expr_stmt' and annassign.type == 'annassign':
|
|
||||||
node = annassign.children[1]
|
|
||||||
if node.type == "atom_expr" and node.children[0].value == "ClassVar":
|
|
||||||
continue
|
|
||||||
|
|
||||||
if len(annassign.children) < 4:
|
|
||||||
default = None
|
|
||||||
else:
|
|
||||||
default = annassign.children[3]
|
|
||||||
|
|
||||||
param_names.append(DataclassParamName(
|
|
||||||
parent_context=cls.parent_context,
|
|
||||||
tree_name=name.tree_name,
|
|
||||||
annotation_node=annassign.children[1],
|
|
||||||
default_node=default,
|
|
||||||
))
|
|
||||||
return param_names
|
|
||||||
|
|
||||||
|
|
||||||
class ClassMixin:
|
class ClassMixin:
|
||||||
def is_class(self):
|
def is_class(self):
|
||||||
return True
|
return True
|
||||||
@@ -288,73 +234,6 @@ class ClassMixin:
|
|||||||
assert x is not None
|
assert x is not None
|
||||||
yield x
|
yield x
|
||||||
|
|
||||||
def _has_dataclass_transform_metaclasses(self) -> Tuple[bool, Optional[bool]]:
|
|
||||||
for meta in self.get_metaclasses(): # type: ignore[attr-defined]
|
|
||||||
if (
|
|
||||||
isinstance(meta, Decoratee)
|
|
||||||
# Internal leakage :|
|
|
||||||
and isinstance(meta._wrapped_value, DataclassTransformer)
|
|
||||||
):
|
|
||||||
return True, meta._wrapped_value.init_mode_from_new()
|
|
||||||
|
|
||||||
return False, None
|
|
||||||
|
|
||||||
def _get_dataclass_transform_signatures(self) -> List[DataclassSignature]:
|
|
||||||
"""
|
|
||||||
Returns: A non-empty list if the class has dataclass semantics else an
|
|
||||||
empty list.
|
|
||||||
|
|
||||||
The dataclass-like semantics will be assumed for any class that directly
|
|
||||||
or indirectly derives from the decorated class or uses the decorated
|
|
||||||
class as a metaclass.
|
|
||||||
"""
|
|
||||||
param_names = []
|
|
||||||
is_dataclass_transform = False
|
|
||||||
default_init_mode: Optional[bool] = None
|
|
||||||
for cls in reversed(list(self.py__mro__())):
|
|
||||||
if not is_dataclass_transform:
|
|
||||||
|
|
||||||
# If dataclass_transform is applied to a class, dataclass-like semantics
|
|
||||||
# will be assumed for any class that directly or indirectly derives from
|
|
||||||
# the decorated class or uses the decorated class as a metaclass.
|
|
||||||
if (
|
|
||||||
isinstance(cls, DataclassTransformer)
|
|
||||||
and cls.init_mode_from_init_subclass
|
|
||||||
):
|
|
||||||
is_dataclass_transform = True
|
|
||||||
default_init_mode = cls.init_mode_from_init_subclass
|
|
||||||
|
|
||||||
elif (
|
|
||||||
# Some object like CompiledValues would not be compatible
|
|
||||||
isinstance(cls, ClassMixin)
|
|
||||||
):
|
|
||||||
is_dataclass_transform, default_init_mode = (
|
|
||||||
cls._has_dataclass_transform_metaclasses()
|
|
||||||
)
|
|
||||||
|
|
||||||
# Attributes on the decorated class and its base classes are not
|
|
||||||
# considered to be fields.
|
|
||||||
if is_dataclass_transform:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# All inherited classes behave like dataclass semantics
|
|
||||||
if (
|
|
||||||
is_dataclass_transform
|
|
||||||
and isinstance(cls, ClassValue)
|
|
||||||
and (
|
|
||||||
cls.init_param_mode()
|
|
||||||
or (cls.init_param_mode() is None and default_init_mode)
|
|
||||||
)
|
|
||||||
):
|
|
||||||
param_names.extend(
|
|
||||||
get_dataclass_param_names(cls)
|
|
||||||
)
|
|
||||||
|
|
||||||
if is_dataclass_transform:
|
|
||||||
return [DataclassSignature(cls, param_names)]
|
|
||||||
else:
|
|
||||||
return []
|
|
||||||
|
|
||||||
def get_signatures(self):
|
def get_signatures(self):
|
||||||
# Since calling staticmethod without a function is illegal, the Jedi
|
# Since calling staticmethod without a function is illegal, the Jedi
|
||||||
# plugin doesn't return anything. Therefore call directly and get what
|
# plugin doesn't return anything. Therefore call directly and get what
|
||||||
@@ -366,12 +245,7 @@ class ClassMixin:
|
|||||||
return sigs
|
return sigs
|
||||||
args = ValuesArguments([])
|
args = ValuesArguments([])
|
||||||
init_funcs = self.py__call__(args).py__getattribute__('__init__')
|
init_funcs = self.py__call__(args).py__getattribute__('__init__')
|
||||||
|
return [sig.bind(self) for sig in init_funcs.get_signatures()]
|
||||||
dataclass_sigs = self._get_dataclass_transform_signatures()
|
|
||||||
if dataclass_sigs:
|
|
||||||
return dataclass_sigs
|
|
||||||
else:
|
|
||||||
return [sig.bind(self) for sig in init_funcs.get_signatures()]
|
|
||||||
|
|
||||||
def _as_context(self):
|
def _as_context(self):
|
||||||
return ClassContext(self)
|
return ClassContext(self)
|
||||||
@@ -458,158 +332,6 @@ class ClassMixin:
|
|||||||
return ValueSet({self})
|
return ValueSet({self})
|
||||||
|
|
||||||
|
|
||||||
class DataclassParamName(BaseTreeParamName):
|
|
||||||
"""
|
|
||||||
Represent a field declaration on a class with dataclass semantics.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, parent_context, tree_name, annotation_node, default_node):
|
|
||||||
super().__init__(parent_context, tree_name)
|
|
||||||
self.annotation_node = annotation_node
|
|
||||||
self.default_node = default_node
|
|
||||||
|
|
||||||
def get_kind(self):
|
|
||||||
return Parameter.POSITIONAL_OR_KEYWORD
|
|
||||||
|
|
||||||
def infer(self):
|
|
||||||
if self.annotation_node is None:
|
|
||||||
return NO_VALUES
|
|
||||||
else:
|
|
||||||
return self.parent_context.infer_node(self.annotation_node)
|
|
||||||
|
|
||||||
|
|
||||||
class DataclassSignature(AbstractSignature):
|
|
||||||
"""
|
|
||||||
It represents the ``__init__`` signature of a class with dataclass semantics.
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
"""
|
|
||||||
def __init__(self, value, param_names):
|
|
||||||
super().__init__(value)
|
|
||||||
self._param_names = param_names
|
|
||||||
|
|
||||||
def get_param_names(self, resolve_stars=False):
|
|
||||||
return self._param_names
|
|
||||||
|
|
||||||
|
|
||||||
class DataclassDecorator(ValueWrapper, FunctionMixin):
|
|
||||||
"""
|
|
||||||
A dataclass(-like) decorator with custom parameters.
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@dataclass(init=True) # this
|
|
||||||
class A: ...
|
|
||||||
|
|
||||||
@dataclass_transform
|
|
||||||
def create_model(*, init=False): pass
|
|
||||||
|
|
||||||
@create_model(init=False) # or this
|
|
||||||
class B: ...
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, function, arguments, default_init: bool = True):
|
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
function: Decoratee | function
|
|
||||||
arguments: The parameters to the dataclass function decorator
|
|
||||||
default_init: Boolean to indicate the default init value
|
|
||||||
"""
|
|
||||||
super().__init__(function)
|
|
||||||
argument_init = self._init_param_value(arguments)
|
|
||||||
self.init_param_mode = (
|
|
||||||
argument_init if argument_init is not None else default_init
|
|
||||||
)
|
|
||||||
|
|
||||||
def _init_param_value(self, arguments) -> Optional[bool]:
|
|
||||||
if not arguments.argument_node:
|
|
||||||
return None
|
|
||||||
|
|
||||||
arg_nodes = (
|
|
||||||
arguments.argument_node.children
|
|
||||||
if arguments.argument_node.type == "arglist"
|
|
||||||
else [arguments.argument_node]
|
|
||||||
)
|
|
||||||
|
|
||||||
return init_param_value(arg_nodes)
|
|
||||||
|
|
||||||
|
|
||||||
class DataclassTransformer(ValueWrapper, ClassMixin):
|
|
||||||
"""
|
|
||||||
A class decorated with the ``dataclass_transform`` decorator. dataclass-like
|
|
||||||
semantics will be assumed for any class that directly or indirectly derives
|
|
||||||
from the decorated class or uses the decorated class as a metaclass.
|
|
||||||
Attributes on the decorated class and its base classes are not considered to
|
|
||||||
be fields.
|
|
||||||
"""
|
|
||||||
def __init__(self, wrapped_value):
|
|
||||||
super().__init__(wrapped_value)
|
|
||||||
|
|
||||||
def init_mode_from_new(self) -> bool:
|
|
||||||
"""Default value if missing is ``True``"""
|
|
||||||
new_methods = self._wrapped_value.py__getattribute__("__new__")
|
|
||||||
|
|
||||||
if not new_methods:
|
|
||||||
return True
|
|
||||||
|
|
||||||
new_method = list(new_methods)[0]
|
|
||||||
|
|
||||||
for param in new_method.get_param_names():
|
|
||||||
if (
|
|
||||||
param.string_name == "init"
|
|
||||||
and param.default_node
|
|
||||||
and param.default_node.type == "keyword"
|
|
||||||
):
|
|
||||||
if param.default_node.value == "False":
|
|
||||||
return False
|
|
||||||
elif param.default_node.value == "True":
|
|
||||||
return True
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
@property
|
|
||||||
def init_mode_from_init_subclass(self) -> Optional[bool]:
|
|
||||||
# def __init_subclass__(cls) -> None: ... is hardcoded in the typeshed
|
|
||||||
# so the extra parameters can not be inferred.
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
class DataclassWrapper(ValueWrapper, ClassMixin):
|
|
||||||
"""
|
|
||||||
A class with dataclass semantics from a decorator. The init parameters are
|
|
||||||
only from the current class and parent classes decorated where the ``init``
|
|
||||||
parameter was ``True``.
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class A: ... # this
|
|
||||||
|
|
||||||
@dataclass_transform
|
|
||||||
def create_model(): pass
|
|
||||||
|
|
||||||
@create_model()
|
|
||||||
class B: ... # or this
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self, wrapped_value, should_generate_init: bool
|
|
||||||
):
|
|
||||||
super().__init__(wrapped_value)
|
|
||||||
self.should_generate_init = should_generate_init
|
|
||||||
|
|
||||||
def get_signatures(self):
|
|
||||||
param_names = []
|
|
||||||
for cls in reversed(list(self.py__mro__())):
|
|
||||||
if (
|
|
||||||
isinstance(cls, DataclassWrapper)
|
|
||||||
and cls.should_generate_init
|
|
||||||
):
|
|
||||||
param_names.extend(get_dataclass_param_names(cls))
|
|
||||||
return [DataclassSignature(cls, param_names)]
|
|
||||||
|
|
||||||
|
|
||||||
class ClassValue(ClassMixin, FunctionAndClassBase, metaclass=CachedMetaClass):
|
class ClassValue(ClassMixin, FunctionAndClassBase, metaclass=CachedMetaClass):
|
||||||
api_type = 'class'
|
api_type = 'class'
|
||||||
|
|
||||||
@@ -676,19 +398,6 @@ class ClassValue(ClassMixin, FunctionAndClassBase, metaclass=CachedMetaClass):
|
|||||||
return values
|
return values
|
||||||
return NO_VALUES
|
return NO_VALUES
|
||||||
|
|
||||||
def init_param_mode(self) -> Optional[bool]:
|
|
||||||
"""
|
|
||||||
It returns ``True`` if ``class X(init=False):`` else ``False``.
|
|
||||||
"""
|
|
||||||
bases_arguments = self._get_bases_arguments()
|
|
||||||
|
|
||||||
if bases_arguments.argument_node.type != "arglist":
|
|
||||||
# If it is not inheriting from the base model and having
|
|
||||||
# extra parameters, then init behavior is not changed.
|
|
||||||
return None
|
|
||||||
|
|
||||||
return init_param_value(bases_arguments.argument_node.children)
|
|
||||||
|
|
||||||
@plugin_manager.decorate()
|
@plugin_manager.decorate()
|
||||||
def get_metaclass_signatures(self, metaclasses):
|
def get_metaclass_signatures(self, metaclasses):
|
||||||
return []
|
return []
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ class ModuleMixin(SubModuleDictMixin):
|
|||||||
parent_context=self.as_context(),
|
parent_context=self.as_context(),
|
||||||
origin_scope=origin_scope
|
origin_scope=origin_scope
|
||||||
),
|
),
|
||||||
GlobalNameFilter(self.as_context()),
|
GlobalNameFilter(self.as_context(), self.tree_node),
|
||||||
)
|
)
|
||||||
yield DictFilter(self.sub_modules_dict())
|
yield DictFilter(self.sub_modules_dict())
|
||||||
yield DictFilter(self._module_attributes_dict())
|
yield DictFilter(self._module_attributes_dict())
|
||||||
@@ -80,7 +80,7 @@ class ModuleMixin(SubModuleDictMixin):
|
|||||||
def is_stub(self):
|
def is_stub(self):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
@property # type: ignore[misc]
|
||||||
@inference_state_method_cache()
|
@inference_state_method_cache()
|
||||||
def name(self):
|
def name(self):
|
||||||
return self._module_name_class(self, self.string_names[-1])
|
return self._module_name_class(self, self.string_names[-1])
|
||||||
@@ -138,7 +138,7 @@ class ModuleValue(ModuleMixin, TreeValue):
|
|||||||
api_type = 'module'
|
api_type = 'module'
|
||||||
|
|
||||||
def __init__(self, inference_state, module_node, code_lines, file_io=None,
|
def __init__(self, inference_state, module_node, code_lines, file_io=None,
|
||||||
string_names=None, is_package=False) -> None:
|
string_names=None, is_package=False):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
inference_state,
|
inference_state,
|
||||||
parent_context=None,
|
parent_context=None,
|
||||||
@@ -148,8 +148,8 @@ class ModuleValue(ModuleMixin, TreeValue):
|
|||||||
if file_io is None:
|
if file_io is None:
|
||||||
self._path: Optional[Path] = None
|
self._path: Optional[Path] = None
|
||||||
else:
|
else:
|
||||||
self._path = file_io.path
|
self._path = Path(file_io.path)
|
||||||
self.string_names: Optional[tuple[str, ...]] = string_names
|
self.string_names = string_names # Optional[Tuple[str, ...]]
|
||||||
self.code_lines = code_lines
|
self.code_lines = code_lines
|
||||||
self._is_package = is_package
|
self._is_package = is_package
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,3 @@
|
|||||||
from pathlib import Path
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
from jedi.inference.cache import inference_state_method_cache
|
from jedi.inference.cache import inference_state_method_cache
|
||||||
from jedi.inference.filters import DictFilter
|
from jedi.inference.filters import DictFilter
|
||||||
from jedi.inference.names import ValueNameMixin, AbstractNameDefinition
|
from jedi.inference.names import ValueNameMixin, AbstractNameDefinition
|
||||||
@@ -38,13 +35,13 @@ class ImplicitNamespaceValue(Value, SubModuleDictMixin):
|
|||||||
def get_qualified_names(self):
|
def get_qualified_names(self):
|
||||||
return ()
|
return ()
|
||||||
|
|
||||||
@property
|
@property # type: ignore[misc]
|
||||||
@inference_state_method_cache()
|
@inference_state_method_cache()
|
||||||
def name(self):
|
def name(self):
|
||||||
string_name = self.py__package__()[-1]
|
string_name = self.py__package__()[-1]
|
||||||
return ImplicitNSName(self, string_name)
|
return ImplicitNSName(self, string_name)
|
||||||
|
|
||||||
def py__file__(self) -> Optional[Path]:
|
def py__file__(self):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def py__package__(self):
|
def py__package__(self):
|
||||||
|
|||||||
@@ -216,14 +216,11 @@ def is_scope(node):
|
|||||||
def _get_parent_scope_cache(func):
|
def _get_parent_scope_cache(func):
|
||||||
cache = WeakKeyDictionary()
|
cache = WeakKeyDictionary()
|
||||||
|
|
||||||
def wrapper(parso_cache_node, node, include_flows=False):
|
def wrapper(used_names, node, include_flows=False):
|
||||||
if parso_cache_node is None:
|
|
||||||
return func(node, include_flows)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
for_module = cache[parso_cache_node]
|
for_module = cache[used_names]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
for_module = cache[parso_cache_node] = {}
|
for_module = cache[used_names] = {}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return for_module[node]
|
return for_module[node]
|
||||||
@@ -273,18 +270,7 @@ def get_cached_code_lines(grammar, path):
|
|||||||
Basically access the cached code lines in parso. This is not the nicest way
|
Basically access the cached code lines in parso. This is not the nicest way
|
||||||
to do this, but we avoid splitting all the lines again.
|
to do this, but we avoid splitting all the lines again.
|
||||||
"""
|
"""
|
||||||
return get_parso_cache_node(grammar, path).lines
|
return parser_cache[grammar._hashed][path].lines
|
||||||
|
|
||||||
|
|
||||||
def get_parso_cache_node(grammar, path):
|
|
||||||
"""
|
|
||||||
This is of course not public. But as long as I control parso, this
|
|
||||||
shouldn't be a problem. ~ Dave
|
|
||||||
|
|
||||||
The reason for this is mostly caching. This is obviously also a sign of a
|
|
||||||
broken caching architecture.
|
|
||||||
"""
|
|
||||||
return parser_cache[grammar._hashed][path]
|
|
||||||
|
|
||||||
|
|
||||||
def cut_value_at_position(leaf, position):
|
def cut_value_at_position(leaf, position):
|
||||||
@@ -320,7 +306,7 @@ def expr_is_dotted(node):
|
|||||||
return node.type == 'name'
|
return node.type == 'name'
|
||||||
|
|
||||||
|
|
||||||
def _function_is_x_method(decorator_checker):
|
def _function_is_x_method(*method_names):
|
||||||
def wrapper(function_node):
|
def wrapper(function_node):
|
||||||
"""
|
"""
|
||||||
This is a heuristic. It will not hold ALL the times, but it will be
|
This is a heuristic. It will not hold ALL the times, but it will be
|
||||||
@@ -330,16 +316,12 @@ def _function_is_x_method(decorator_checker):
|
|||||||
"""
|
"""
|
||||||
for decorator in function_node.get_decorators():
|
for decorator in function_node.get_decorators():
|
||||||
dotted_name = decorator.children[1]
|
dotted_name = decorator.children[1]
|
||||||
if decorator_checker(dotted_name.get_code()):
|
if dotted_name.get_code() in method_names:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
function_is_staticmethod = _function_is_x_method(lambda m: m == "staticmethod")
|
function_is_staticmethod = _function_is_x_method('staticmethod')
|
||||||
function_is_classmethod = _function_is_x_method(lambda m: m == "classmethod")
|
function_is_classmethod = _function_is_x_method('classmethod')
|
||||||
function_is_property = _function_is_x_method(
|
function_is_property = _function_is_x_method('property', 'cached_property')
|
||||||
lambda m: m == "property"
|
|
||||||
or m == "cached_property"
|
|
||||||
or (m.endswith(".setter"))
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
import sys
|
|
||||||
from typing import List
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
from parso.tree import search_ancestor
|
||||||
from jedi.inference.cache import inference_state_method_cache
|
from jedi.inference.cache import inference_state_method_cache
|
||||||
from jedi.inference.imports import goto_import, load_module_from_path
|
from jedi.inference.imports import load_module_from_path
|
||||||
from jedi.inference.filters import ParserTreeFilter
|
from jedi.inference.filters import ParserTreeFilter
|
||||||
from jedi.inference.base_value import NO_VALUES, ValueSet
|
from jedi.inference.base_value import NO_VALUES, ValueSet
|
||||||
from jedi.inference.helpers import infer_call_of_leaf
|
from jedi.inference.helpers import infer_call_of_leaf
|
||||||
@@ -32,15 +31,7 @@ def execute(callback):
|
|||||||
def infer_anonymous_param(func):
|
def infer_anonymous_param(func):
|
||||||
def get_returns(value):
|
def get_returns(value):
|
||||||
if value.tree_node.annotation is not None:
|
if value.tree_node.annotation is not None:
|
||||||
result = value.execute_with_values()
|
return value.execute_with_values()
|
||||||
if any(v.name.get_qualified_names(include_module_names=True)
|
|
||||||
== ('typing', 'Generator')
|
|
||||||
for v in result):
|
|
||||||
return ValueSet.from_sets(
|
|
||||||
v.py__getattribute__('__next__').execute_annotation()
|
|
||||||
for v in result
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
|
|
||||||
# In pytest we need to differentiate between generators and normal
|
# In pytest we need to differentiate between generators and normal
|
||||||
# returns.
|
# returns.
|
||||||
@@ -52,9 +43,6 @@ def infer_anonymous_param(func):
|
|||||||
return function_context.get_return_values()
|
return function_context.get_return_values()
|
||||||
|
|
||||||
def wrapper(param_name):
|
def wrapper(param_name):
|
||||||
# parameters with an annotation do not need special handling
|
|
||||||
if param_name.annotation_node:
|
|
||||||
return func(param_name)
|
|
||||||
is_pytest_param, param_name_is_function_name = \
|
is_pytest_param, param_name_is_function_name = \
|
||||||
_is_a_pytest_param_and_inherited(param_name)
|
_is_a_pytest_param_and_inherited(param_name)
|
||||||
if is_pytest_param:
|
if is_pytest_param:
|
||||||
@@ -119,7 +107,7 @@ def _is_a_pytest_param_and_inherited(param_name):
|
|||||||
|
|
||||||
This is a heuristic and will work in most cases.
|
This is a heuristic and will work in most cases.
|
||||||
"""
|
"""
|
||||||
funcdef = param_name.tree_name.search_ancestor('funcdef')
|
funcdef = search_ancestor(param_name.tree_name, 'funcdef')
|
||||||
if funcdef is None: # A lambda
|
if funcdef is None: # A lambda
|
||||||
return False, False
|
return False, False
|
||||||
decorators = funcdef.get_decorators()
|
decorators = funcdef.get_decorators()
|
||||||
@@ -132,36 +120,6 @@ def _is_pytest_func(func_name, decorator_nodes):
|
|||||||
or any('fixture' in n.get_code() for n in decorator_nodes)
|
or any('fixture' in n.get_code() for n in decorator_nodes)
|
||||||
|
|
||||||
|
|
||||||
def _find_pytest_plugin_modules() -> List[List[str]]:
|
|
||||||
"""
|
|
||||||
Finds pytest plugin modules hooked by setuptools entry points
|
|
||||||
|
|
||||||
See https://docs.pytest.org/en/stable/how-to/writing_plugins.html#setuptools-entry-points
|
|
||||||
"""
|
|
||||||
if sys.version_info >= (3, 8):
|
|
||||||
from importlib.metadata import entry_points
|
|
||||||
|
|
||||||
if sys.version_info >= (3, 10):
|
|
||||||
pytest_entry_points = entry_points(group="pytest11")
|
|
||||||
else:
|
|
||||||
pytest_entry_points = entry_points().get("pytest11", ())
|
|
||||||
|
|
||||||
if sys.version_info >= (3, 9):
|
|
||||||
return [ep.module.split(".") for ep in pytest_entry_points]
|
|
||||||
else:
|
|
||||||
# Python 3.8 doesn't have `EntryPoint.module`. Implement equivalent
|
|
||||||
# to what Python 3.9 does (with additional None check to placate `mypy`)
|
|
||||||
matches = [
|
|
||||||
ep.pattern.match(ep.value)
|
|
||||||
for ep in pytest_entry_points
|
|
||||||
]
|
|
||||||
return [x.group('module').split(".") for x in matches if x]
|
|
||||||
|
|
||||||
else:
|
|
||||||
from pkg_resources import iter_entry_points
|
|
||||||
return [ep.module_name.split(".") for ep in iter_entry_points(group="pytest11")]
|
|
||||||
|
|
||||||
|
|
||||||
@inference_state_method_cache()
|
@inference_state_method_cache()
|
||||||
def _iter_pytest_modules(module_context, skip_own_module=False):
|
def _iter_pytest_modules(module_context, skip_own_module=False):
|
||||||
if not skip_own_module:
|
if not skip_own_module:
|
||||||
@@ -171,74 +129,32 @@ def _iter_pytest_modules(module_context, skip_own_module=False):
|
|||||||
if file_io is not None:
|
if file_io is not None:
|
||||||
folder = file_io.get_parent_folder()
|
folder = file_io.get_parent_folder()
|
||||||
sys_path = module_context.inference_state.get_sys_path()
|
sys_path = module_context.inference_state.get_sys_path()
|
||||||
|
|
||||||
# prevent an infinite loop when reaching the root of the current drive
|
|
||||||
last_folder = None
|
|
||||||
|
|
||||||
while any(folder.path.startswith(p) for p in sys_path):
|
while any(folder.path.startswith(p) for p in sys_path):
|
||||||
file_io = folder.get_file_io('conftest.py')
|
file_io = folder.get_file_io('conftest.py')
|
||||||
if Path(file_io.path) != module_context.py__file__():
|
if Path(file_io.path) != module_context.py__file__():
|
||||||
try:
|
try:
|
||||||
m = load_module_from_path(module_context.inference_state, file_io)
|
m = load_module_from_path(module_context.inference_state, file_io)
|
||||||
conftest_module = m.as_context()
|
yield m.as_context()
|
||||||
yield conftest_module
|
|
||||||
|
|
||||||
plugins_list = m.tree_node.get_used_names().get("pytest_plugins")
|
|
||||||
if plugins_list:
|
|
||||||
name = conftest_module.create_name(plugins_list[0])
|
|
||||||
yield from _load_pytest_plugins(module_context, name)
|
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
pass
|
pass
|
||||||
folder = folder.get_parent_folder()
|
folder = folder.get_parent_folder()
|
||||||
|
|
||||||
# prevent an infinite for loop if the same parent folder is return twice
|
for names in _PYTEST_FIXTURE_MODULES:
|
||||||
if last_folder is not None and folder.path == last_folder.path:
|
|
||||||
break
|
|
||||||
last_folder = folder # keep track of the last found parent name
|
|
||||||
|
|
||||||
for names in _PYTEST_FIXTURE_MODULES + _find_pytest_plugin_modules():
|
|
||||||
for module_value in module_context.inference_state.import_module(names):
|
for module_value in module_context.inference_state.import_module(names):
|
||||||
yield module_value.as_context()
|
yield module_value.as_context()
|
||||||
|
|
||||||
|
|
||||||
def _load_pytest_plugins(module_context, name):
|
|
||||||
from jedi.inference.helpers import get_str_or_none
|
|
||||||
|
|
||||||
for inferred in name.infer():
|
|
||||||
for seq_value in inferred.py__iter__():
|
|
||||||
for value in seq_value.infer():
|
|
||||||
fq_name = get_str_or_none(value)
|
|
||||||
if fq_name:
|
|
||||||
names = fq_name.split(".")
|
|
||||||
for module_value in module_context.inference_state.import_module(names):
|
|
||||||
yield module_value.as_context()
|
|
||||||
|
|
||||||
|
|
||||||
class FixtureFilter(ParserTreeFilter):
|
class FixtureFilter(ParserTreeFilter):
|
||||||
def _filter(self, names):
|
def _filter(self, names):
|
||||||
for name in super()._filter(names):
|
for name in super()._filter(names):
|
||||||
# look for fixture definitions of imported names
|
funcdef = name.parent
|
||||||
if name.parent.type == "import_from":
|
# Class fixtures are not supported
|
||||||
imported_names = goto_import(self.parent_context, name)
|
if funcdef.type == 'funcdef':
|
||||||
if any(
|
decorated = funcdef.parent
|
||||||
self._is_fixture(iname.parent_context, iname.tree_name)
|
if decorated.type == 'decorated' and self._is_fixture(decorated):
|
||||||
for iname in imported_names
|
|
||||||
# discard imports of whole modules, that have no tree_name
|
|
||||||
if iname.tree_name
|
|
||||||
):
|
|
||||||
yield name
|
yield name
|
||||||
|
|
||||||
elif self._is_fixture(self.parent_context, name):
|
def _is_fixture(self, decorated):
|
||||||
yield name
|
|
||||||
|
|
||||||
def _is_fixture(self, context, name):
|
|
||||||
funcdef = name.parent
|
|
||||||
# Class fixtures are not supported
|
|
||||||
if funcdef.type != "funcdef":
|
|
||||||
return False
|
|
||||||
decorated = funcdef.parent
|
|
||||||
if decorated.type != "decorated":
|
|
||||||
return False
|
|
||||||
decorators = decorated.children[0]
|
decorators = decorated.children[0]
|
||||||
if decorators.type == 'decorators':
|
if decorators.type == 'decorators':
|
||||||
decorators = decorators.children
|
decorators = decorators.children
|
||||||
@@ -255,12 +171,11 @@ class FixtureFilter(ParserTreeFilter):
|
|||||||
last_leaf = last_trailer.get_last_leaf()
|
last_leaf = last_trailer.get_last_leaf()
|
||||||
if last_leaf == ')':
|
if last_leaf == ')':
|
||||||
values = infer_call_of_leaf(
|
values = infer_call_of_leaf(
|
||||||
context, last_leaf, cut_own_trailer=True
|
self.parent_context, last_leaf, cut_own_trailer=True)
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
values = context.infer_node(dotted_name)
|
values = self.parent_context.infer_node(dotted_name)
|
||||||
else:
|
else:
|
||||||
values = context.infer_node(dotted_name)
|
values = self.parent_context.infer_node(dotted_name)
|
||||||
for value in values:
|
for value in values:
|
||||||
if value.name.get_qualified_names(include_module_names=True) \
|
if value.name.get_qualified_names(include_module_names=True) \
|
||||||
== ('_pytest', 'fixtures', 'fixture'):
|
== ('_pytest', 'fixtures', 'fixture'):
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ compiled module that returns the types for C-builtins.
|
|||||||
"""
|
"""
|
||||||
import parso
|
import parso
|
||||||
import os
|
import os
|
||||||
|
from inspect import Parameter
|
||||||
|
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi.inference.utils import safe_property
|
from jedi.inference.utils import safe_property
|
||||||
@@ -24,20 +25,15 @@ from jedi.inference.value.instance import \
|
|||||||
from jedi.inference.base_value import ContextualizedNode, \
|
from jedi.inference.base_value import ContextualizedNode, \
|
||||||
NO_VALUES, ValueSet, ValueWrapper, LazyValueWrapper
|
NO_VALUES, ValueSet, ValueWrapper, LazyValueWrapper
|
||||||
from jedi.inference.value import ClassValue, ModuleValue
|
from jedi.inference.value import ClassValue, ModuleValue
|
||||||
from jedi.inference.value.decorator import Decoratee
|
from jedi.inference.value.klass import ClassMixin
|
||||||
from jedi.inference.value.klass import (
|
|
||||||
DataclassWrapper,
|
|
||||||
DataclassDecorator,
|
|
||||||
DataclassTransformer,
|
|
||||||
)
|
|
||||||
from jedi.inference.value.function import FunctionMixin
|
from jedi.inference.value.function import FunctionMixin
|
||||||
from jedi.inference.value import iterable
|
from jedi.inference.value import iterable
|
||||||
from jedi.inference.lazy_value import LazyTreeValue, LazyKnownValue, \
|
from jedi.inference.lazy_value import LazyTreeValue, LazyKnownValue, \
|
||||||
LazyKnownValues
|
LazyKnownValues
|
||||||
from jedi.inference.names import ValueName
|
from jedi.inference.names import ValueName, BaseTreeParamName
|
||||||
from jedi.inference.filters import AttributeOverwrite, publish_method, \
|
from jedi.inference.filters import AttributeOverwrite, publish_method, \
|
||||||
ParserTreeFilter, DictFilter
|
ParserTreeFilter, DictFilter
|
||||||
from jedi.inference.signature import SignatureWrapper
|
from jedi.inference.signature import AbstractSignature, SignatureWrapper
|
||||||
|
|
||||||
|
|
||||||
# Copied from Python 3.6's stdlib.
|
# Copied from Python 3.6's stdlib.
|
||||||
@@ -595,103 +591,65 @@ def _random_choice(sequences):
|
|||||||
|
|
||||||
|
|
||||||
def _dataclass(value, arguments, callback):
|
def _dataclass(value, arguments, callback):
|
||||||
"""
|
|
||||||
Decorator entry points for dataclass.
|
|
||||||
|
|
||||||
1. dataclass decorator declaration with parameters
|
|
||||||
2. dataclass semantics on a class from a dataclass(-like) decorator
|
|
||||||
"""
|
|
||||||
for c in _follow_param(value.inference_state, arguments, 0):
|
for c in _follow_param(value.inference_state, arguments, 0):
|
||||||
if c.is_class():
|
if c.is_class():
|
||||||
# Declare dataclass semantics on a class from a dataclass decorator
|
return ValueSet([DataclassWrapper(c)])
|
||||||
should_generate_init = (
|
|
||||||
# Customized decorator, init may be disabled
|
|
||||||
value.init_param_mode
|
|
||||||
if isinstance(value, DataclassDecorator)
|
|
||||||
# Bare dataclass decorator, always with init mode
|
|
||||||
else True
|
|
||||||
)
|
|
||||||
return ValueSet([DataclassWrapper(c, should_generate_init)])
|
|
||||||
else:
|
else:
|
||||||
# @dataclass(init=False)
|
|
||||||
# dataclass decorator customization
|
|
||||||
return ValueSet(
|
|
||||||
[
|
|
||||||
DataclassDecorator(
|
|
||||||
value,
|
|
||||||
arguments=arguments,
|
|
||||||
default_init=True,
|
|
||||||
)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
return NO_VALUES
|
|
||||||
|
|
||||||
|
|
||||||
def _dataclass_transform(value, arguments, callback):
|
|
||||||
"""
|
|
||||||
Decorator entry points for dataclass_transform.
|
|
||||||
|
|
||||||
1. dataclass-like decorator instantiation from a dataclass_transform decorator
|
|
||||||
2. dataclass_transform decorator declaration with parameters
|
|
||||||
3. dataclass-like decorator declaration with parameters
|
|
||||||
4. dataclass-like semantics on a class from a dataclass-like decorator
|
|
||||||
"""
|
|
||||||
for c in _follow_param(value.inference_state, arguments, 0):
|
|
||||||
if c.is_class():
|
|
||||||
is_dataclass_transform = (
|
|
||||||
value.name.string_name == "dataclass_transform"
|
|
||||||
# The decorator function from dataclass_transform acting as the
|
|
||||||
# dataclass decorator.
|
|
||||||
and not isinstance(value, Decoratee)
|
|
||||||
# The decorator function from dataclass_transform acting as the
|
|
||||||
# dataclass decorator with customized parameters
|
|
||||||
and not isinstance(value, DataclassDecorator)
|
|
||||||
)
|
|
||||||
|
|
||||||
if is_dataclass_transform:
|
|
||||||
# Declare base class
|
|
||||||
return ValueSet([DataclassTransformer(c)])
|
|
||||||
else:
|
|
||||||
# Declare dataclass-like semantics on a class from a
|
|
||||||
# dataclass-like decorator
|
|
||||||
should_generate_init = value.init_param_mode
|
|
||||||
return ValueSet([DataclassWrapper(c, should_generate_init)])
|
|
||||||
elif c.is_function():
|
|
||||||
# dataclass-like decorator instantiation:
|
|
||||||
# @dataclass_transform
|
|
||||||
# def create_model()
|
|
||||||
return ValueSet(
|
|
||||||
[
|
|
||||||
DataclassDecorator(
|
|
||||||
value,
|
|
||||||
arguments=arguments,
|
|
||||||
default_init=True,
|
|
||||||
)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
elif (
|
|
||||||
# @dataclass_transform
|
|
||||||
# def create_model(): pass
|
|
||||||
# @create_model(init=...)
|
|
||||||
isinstance(value, Decoratee)
|
|
||||||
):
|
|
||||||
# dataclass (or like) decorator customization
|
|
||||||
return ValueSet(
|
|
||||||
[
|
|
||||||
DataclassDecorator(
|
|
||||||
value,
|
|
||||||
arguments=arguments,
|
|
||||||
default_init=value._wrapped_value.init_param_mode,
|
|
||||||
)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# dataclass_transform decorator with parameters; nothing impactful
|
|
||||||
return ValueSet([value])
|
return ValueSet([value])
|
||||||
return NO_VALUES
|
return NO_VALUES
|
||||||
|
|
||||||
|
|
||||||
|
class DataclassWrapper(ValueWrapper, ClassMixin):
|
||||||
|
def get_signatures(self):
|
||||||
|
param_names = []
|
||||||
|
for cls in reversed(list(self.py__mro__())):
|
||||||
|
if isinstance(cls, DataclassWrapper):
|
||||||
|
filter_ = cls.as_context().get_global_filter()
|
||||||
|
# .values ordering is not guaranteed, at least not in
|
||||||
|
# Python < 3.6, when dicts where not ordered, which is an
|
||||||
|
# implementation detail anyway.
|
||||||
|
for name in sorted(filter_.values(), key=lambda name: name.start_pos):
|
||||||
|
d = name.tree_name.get_definition()
|
||||||
|
annassign = d.children[1]
|
||||||
|
if d.type == 'expr_stmt' and annassign.type == 'annassign':
|
||||||
|
if len(annassign.children) < 4:
|
||||||
|
default = None
|
||||||
|
else:
|
||||||
|
default = annassign.children[3]
|
||||||
|
param_names.append(DataclassParamName(
|
||||||
|
parent_context=cls.parent_context,
|
||||||
|
tree_name=name.tree_name,
|
||||||
|
annotation_node=annassign.children[1],
|
||||||
|
default_node=default,
|
||||||
|
))
|
||||||
|
return [DataclassSignature(cls, param_names)]
|
||||||
|
|
||||||
|
|
||||||
|
class DataclassSignature(AbstractSignature):
|
||||||
|
def __init__(self, value, param_names):
|
||||||
|
super().__init__(value)
|
||||||
|
self._param_names = param_names
|
||||||
|
|
||||||
|
def get_param_names(self, resolve_stars=False):
|
||||||
|
return self._param_names
|
||||||
|
|
||||||
|
|
||||||
|
class DataclassParamName(BaseTreeParamName):
|
||||||
|
def __init__(self, parent_context, tree_name, annotation_node, default_node):
|
||||||
|
super().__init__(parent_context, tree_name)
|
||||||
|
self.annotation_node = annotation_node
|
||||||
|
self.default_node = default_node
|
||||||
|
|
||||||
|
def get_kind(self):
|
||||||
|
return Parameter.POSITIONAL_OR_KEYWORD
|
||||||
|
|
||||||
|
def infer(self):
|
||||||
|
if self.annotation_node is None:
|
||||||
|
return NO_VALUES
|
||||||
|
else:
|
||||||
|
return self.parent_context.infer_node(self.annotation_node)
|
||||||
|
|
||||||
|
|
||||||
class ItemGetterCallable(ValueWrapper):
|
class ItemGetterCallable(ValueWrapper):
|
||||||
def __init__(self, instance, args_value_set):
|
def __init__(self, instance, args_value_set):
|
||||||
super().__init__(instance)
|
super().__init__(instance)
|
||||||
@@ -840,12 +798,6 @@ _implemented = {
|
|||||||
# runtime_checkable doesn't really change anything and is just
|
# runtime_checkable doesn't really change anything and is just
|
||||||
# adding logs for infering stuff, so we can safely ignore it.
|
# adding logs for infering stuff, so we can safely ignore it.
|
||||||
'runtime_checkable': lambda value, arguments, callback: NO_VALUES,
|
'runtime_checkable': lambda value, arguments, callback: NO_VALUES,
|
||||||
# Python 3.11+
|
|
||||||
'dataclass_transform': _dataclass_transform,
|
|
||||||
},
|
|
||||||
'typing_extensions': {
|
|
||||||
# Python <3.11
|
|
||||||
'dataclass_transform': _dataclass_transform,
|
|
||||||
},
|
},
|
||||||
'dataclasses': {
|
'dataclasses': {
|
||||||
# For now this works at least better than Jedi trying to understand it.
|
# For now this works at least better than Jedi trying to understand it.
|
||||||
|
|||||||
@@ -143,15 +143,6 @@ This improves autocompletion for libraries that use ``setattr`` or
|
|||||||
``globals()`` modifications a lot.
|
``globals()`` modifications a lot.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
allow_unsafe_interpreter_executions = True
|
|
||||||
"""
|
|
||||||
Controls whether descriptors are evaluated when using an Interpreter. This is
|
|
||||||
something you might want to control when using Jedi from a Repl (e.g. IPython)
|
|
||||||
|
|
||||||
Generally this setting allows Jedi to execute __getitem__ and descriptors like
|
|
||||||
`property`.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# ----------------
|
# ----------------
|
||||||
# Caching Validity
|
# Caching Validity
|
||||||
# ----------------
|
# ----------------
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
Utilities for end-users.
|
Utilities for end-users.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import __main__
|
import __main__ # type: ignore[import]
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
import logging
|
import logging
|
||||||
import traceback
|
import traceback
|
||||||
|
|||||||
@@ -1,32 +0,0 @@
|
|||||||
[tool.mypy]
|
|
||||||
# Exclude our copies of external stubs
|
|
||||||
exclude = "^jedi/third_party"
|
|
||||||
|
|
||||||
show_error_codes = true
|
|
||||||
enable_error_code = "ignore-without-code"
|
|
||||||
|
|
||||||
# Ensure generics are explicit about what they are (e.g: `List[str]` rather than
|
|
||||||
# just `List`)
|
|
||||||
disallow_any_generics = true
|
|
||||||
|
|
||||||
disallow_subclassing_any = true
|
|
||||||
|
|
||||||
# Avoid creating future gotchas emerging from bad typing
|
|
||||||
warn_redundant_casts = true
|
|
||||||
warn_unused_ignores = true
|
|
||||||
warn_return_any = true
|
|
||||||
warn_unused_configs = true
|
|
||||||
|
|
||||||
warn_unreachable = true
|
|
||||||
|
|
||||||
# Require values to be explicitly re-exported; this makes things easier for
|
|
||||||
# Flake8 too and avoids accidentally importing thing from the "wrong" place
|
|
||||||
# (which helps avoid circular imports)
|
|
||||||
implicit_reexport = false
|
|
||||||
|
|
||||||
strict_equality = true
|
|
||||||
|
|
||||||
[[tool.mypy.overrides]]
|
|
||||||
# Various __init__.py files which contain re-exports we want to implicitly make.
|
|
||||||
module = ["jedi", "jedi.inference.compiled", "jedi.inference.value", "parso"]
|
|
||||||
implicit_reexport = true
|
|
||||||
35
setup.cfg
35
setup.cfg
@@ -21,13 +21,34 @@ per-file-ignores =
|
|||||||
jedi/__init__.py:F401
|
jedi/__init__.py:F401
|
||||||
jedi/inference/compiled/__init__.py:F401
|
jedi/inference/compiled/__init__.py:F401
|
||||||
jedi/inference/value/__init__.py:F401
|
jedi/inference/value/__init__.py:F401
|
||||||
exclude =
|
exclude = jedi/third_party/* .tox/*
|
||||||
.tox/*
|
|
||||||
jedi/third_party/*
|
|
||||||
test/completion/*
|
|
||||||
test/examples/*
|
|
||||||
test/refactor/*
|
|
||||||
test/static_analysis/*
|
|
||||||
|
|
||||||
[pycodestyle]
|
[pycodestyle]
|
||||||
max-line-length = 100
|
max-line-length = 100
|
||||||
|
|
||||||
|
|
||||||
|
[mypy]
|
||||||
|
# Ensure generics are explicit about what they are (e.g: `List[str]` rather than
|
||||||
|
# just `List`)
|
||||||
|
disallow_any_generics = True
|
||||||
|
|
||||||
|
disallow_subclassing_any = True
|
||||||
|
|
||||||
|
# Avoid creating future gotchas emerging from bad typing
|
||||||
|
warn_redundant_casts = True
|
||||||
|
warn_unused_ignores = True
|
||||||
|
warn_return_any = True
|
||||||
|
warn_unused_configs = True
|
||||||
|
|
||||||
|
warn_unreachable = True
|
||||||
|
|
||||||
|
# Require values to be explicitly re-exported; this makes things easier for
|
||||||
|
# Flake8 too and avoids accidentally importing thing from the "wrong" place
|
||||||
|
# (which helps avoid circular imports)
|
||||||
|
implicit_reexport = False
|
||||||
|
|
||||||
|
strict_equality = True
|
||||||
|
|
||||||
|
[mypy-jedi,jedi.inference.compiled,jedi.inference.value,parso]
|
||||||
|
# Various __init__.py files which contain re-exports we want to implicitly make.
|
||||||
|
implicit_reexport = True
|
||||||
|
|||||||
59
setup.py
59
setup.py
@@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
from typing import cast
|
|
||||||
|
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
from setuptools.depends import get_module_constant
|
from setuptools.depends import get_module_constant
|
||||||
@@ -10,7 +9,7 @@ __AUTHOR__ = 'David Halter'
|
|||||||
__AUTHOR_EMAIL__ = 'davidhalter88@gmail.com'
|
__AUTHOR_EMAIL__ = 'davidhalter88@gmail.com'
|
||||||
|
|
||||||
# Get the version from within jedi. It's defined in exactly one place now.
|
# Get the version from within jedi. It's defined in exactly one place now.
|
||||||
version = cast(str, get_module_constant("jedi", "__version__"))
|
version = get_module_constant("jedi", "__version__")
|
||||||
|
|
||||||
readme = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read()
|
readme = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read()
|
||||||
|
|
||||||
@@ -28,62 +27,24 @@ setup(name='jedi',
|
|||||||
maintainer=__AUTHOR__,
|
maintainer=__AUTHOR__,
|
||||||
maintainer_email=__AUTHOR_EMAIL__,
|
maintainer_email=__AUTHOR_EMAIL__,
|
||||||
url='https://github.com/davidhalter/jedi',
|
url='https://github.com/davidhalter/jedi',
|
||||||
project_urls={
|
|
||||||
"Documentation": 'https://jedi.readthedocs.io/en/latest/',
|
|
||||||
},
|
|
||||||
license='MIT',
|
license='MIT',
|
||||||
keywords='python completion refactoring vim',
|
keywords='python completion refactoring vim',
|
||||||
long_description=readme,
|
long_description=readme,
|
||||||
packages=find_packages(exclude=['test', 'test.*']),
|
packages=find_packages(exclude=['test', 'test.*']),
|
||||||
python_requires='>=3.8',
|
python_requires='>=3.6',
|
||||||
# Python 3.13 grammars are added to parso in 0.8.4
|
install_requires=['parso>=0.8.0,<0.9.0'],
|
||||||
install_requires=['parso>=0.8.5,<0.9.0'],
|
|
||||||
extras_require={
|
extras_require={
|
||||||
'testing': [
|
'testing': [
|
||||||
'pytest<9.0.0',
|
'pytest<6.0.0',
|
||||||
# docopt for sith doctests
|
# docopt for sith doctests
|
||||||
'docopt',
|
'docopt',
|
||||||
# coloroma for colored debug output
|
# coloroma for colored debug output
|
||||||
'colorama',
|
'colorama',
|
||||||
'Django',
|
'Django<3.1', # For now pin this.
|
||||||
'attrs',
|
|
||||||
'typing_extensions',
|
|
||||||
],
|
],
|
||||||
'qa': [
|
'qa': [
|
||||||
# latest version on 2025-06-16
|
'flake8==3.8.3',
|
||||||
'flake8==7.2.0',
|
'mypy==0.782',
|
||||||
# latest version supporting Python 3.6
|
|
||||||
'mypy==1.16',
|
|
||||||
# Arbitrary pins, latest at the time of pinning
|
|
||||||
'types-setuptools==80.9.0.20250529',
|
|
||||||
],
|
|
||||||
'docs': [
|
|
||||||
# Just pin all of these.
|
|
||||||
'Jinja2==2.11.3',
|
|
||||||
'MarkupSafe==1.1.1',
|
|
||||||
'Pygments==2.8.1',
|
|
||||||
'alabaster==0.7.12',
|
|
||||||
'babel==2.9.1',
|
|
||||||
'chardet==4.0.0',
|
|
||||||
'commonmark==0.8.1',
|
|
||||||
'docutils==0.17.1',
|
|
||||||
'future==0.18.2',
|
|
||||||
'idna==2.10',
|
|
||||||
'imagesize==1.2.0',
|
|
||||||
'mock==1.0.1',
|
|
||||||
'packaging==20.9',
|
|
||||||
'pyparsing==2.4.7',
|
|
||||||
'pytz==2021.1',
|
|
||||||
'readthedocs-sphinx-ext==2.1.4',
|
|
||||||
'recommonmark==0.5.0',
|
|
||||||
'requests==2.25.1',
|
|
||||||
'six==1.15.0',
|
|
||||||
'snowballstemmer==2.1.0',
|
|
||||||
'sphinx==1.8.5',
|
|
||||||
'sphinx-rtd-theme==0.4.3',
|
|
||||||
'sphinxcontrib-serializinghtml==1.1.4',
|
|
||||||
'sphinxcontrib-websupport==1.2.4',
|
|
||||||
'urllib3==1.26.4',
|
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
package_data={'jedi': ['*.pyi', 'third_party/typeshed/LICENSE',
|
package_data={'jedi': ['*.pyi', 'third_party/typeshed/LICENSE',
|
||||||
@@ -96,12 +57,10 @@ setup(name='jedi',
|
|||||||
'License :: OSI Approved :: MIT License',
|
'License :: OSI Approved :: MIT License',
|
||||||
'Operating System :: OS Independent',
|
'Operating System :: OS Independent',
|
||||||
'Programming Language :: Python :: 3',
|
'Programming Language :: Python :: 3',
|
||||||
|
'Programming Language :: Python :: 3.6',
|
||||||
|
'Programming Language :: Python :: 3.7',
|
||||||
'Programming Language :: Python :: 3.8',
|
'Programming Language :: Python :: 3.8',
|
||||||
'Programming Language :: Python :: 3.9',
|
'Programming Language :: Python :: 3.9',
|
||||||
'Programming Language :: Python :: 3.10',
|
|
||||||
'Programming Language :: Python :: 3.11',
|
|
||||||
'Programming Language :: Python :: 3.12',
|
|
||||||
'Programming Language :: Python :: 3.13',
|
|
||||||
'Topic :: Software Development :: Libraries :: Python Modules',
|
'Topic :: Software Development :: Libraries :: Python Modules',
|
||||||
'Topic :: Text Editors :: Integrated Development Environments (IDE)',
|
'Topic :: Text Editors :: Integrated Development Environments (IDE)',
|
||||||
'Topic :: Utilities',
|
'Topic :: Utilities',
|
||||||
|
|||||||
2
sith.py
2
sith.py
@@ -44,7 +44,7 @@ Options:
|
|||||||
--pudb Launch pudb when error is raised.
|
--pudb Launch pudb when error is raised.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from docopt import docopt # type: ignore[import, unused-ignore]
|
from docopt import docopt # type: ignore[import]
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
|||||||
@@ -44,8 +44,6 @@ b[int():]
|
|||||||
|
|
||||||
#? list()
|
#? list()
|
||||||
b[:]
|
b[:]
|
||||||
#? int()
|
|
||||||
b[:, :-1]
|
|
||||||
|
|
||||||
#? 3
|
#? 3
|
||||||
b[:]
|
b[:]
|
||||||
@@ -69,20 +67,6 @@ class _StrangeSlice():
|
|||||||
#? slice()
|
#? slice()
|
||||||
_StrangeSlice()[1:2]
|
_StrangeSlice()[1:2]
|
||||||
|
|
||||||
for x in b[:]:
|
|
||||||
#? int()
|
|
||||||
x
|
|
||||||
|
|
||||||
for x in b[:, :-1]:
|
|
||||||
#?
|
|
||||||
x
|
|
||||||
|
|
||||||
class Foo:
|
|
||||||
def __getitem__(self, item):
|
|
||||||
return item
|
|
||||||
|
|
||||||
#?
|
|
||||||
Foo()[:, :-1][0]
|
|
||||||
|
|
||||||
# -----------------
|
# -----------------
|
||||||
# iterable multiplication
|
# iterable multiplication
|
||||||
@@ -527,11 +511,3 @@ lc = [x for a, *x in [(1, '', 1.0)]]
|
|||||||
lc[0][0]
|
lc[0][0]
|
||||||
#?
|
#?
|
||||||
lc[0][1]
|
lc[0][1]
|
||||||
|
|
||||||
|
|
||||||
xy = (1,)
|
|
||||||
x, y = *xy, None
|
|
||||||
|
|
||||||
# whatever it is should not crash
|
|
||||||
#?
|
|
||||||
x
|
|
||||||
|
|||||||
@@ -26,6 +26,11 @@ async def y():
|
|||||||
x().__await__().__next
|
x().__await__().__next
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
|
async def x2():
|
||||||
|
async with open('asdf') as f:
|
||||||
|
#? ['readlines']
|
||||||
|
f.readlines
|
||||||
|
|
||||||
class A():
|
class A():
|
||||||
@staticmethod
|
@staticmethod
|
||||||
async def b(c=1, d=2):
|
async def b(c=1, d=2):
|
||||||
@@ -47,6 +52,8 @@ async def awaitable_test():
|
|||||||
#? str()
|
#? str()
|
||||||
foo
|
foo
|
||||||
|
|
||||||
|
# python >= 3.6
|
||||||
|
|
||||||
async def asgen():
|
async def asgen():
|
||||||
yield 1
|
yield 1
|
||||||
await asyncio.sleep(0)
|
await asyncio.sleep(0)
|
||||||
@@ -98,22 +105,3 @@ async def f():
|
|||||||
f = await C().async_for_classmethod()
|
f = await C().async_for_classmethod()
|
||||||
#? C()
|
#? C()
|
||||||
f
|
f
|
||||||
|
|
||||||
|
|
||||||
class AsyncCtxMgr:
|
|
||||||
def some_method():
|
|
||||||
pass
|
|
||||||
|
|
||||||
async def __aenter__(self):
|
|
||||||
return self
|
|
||||||
|
|
||||||
async def __aexit__(self, *args):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
async def asyncctxmgr():
|
|
||||||
async with AsyncCtxMgr() as acm:
|
|
||||||
#? AsyncCtxMgr()
|
|
||||||
acm
|
|
||||||
#? ['some_method']
|
|
||||||
acm.som
|
|
||||||
|
|||||||
@@ -413,10 +413,6 @@ with Foo() as f3:
|
|||||||
with Foo() as f3:
|
with Foo() as f3:
|
||||||
f3
|
f3
|
||||||
|
|
||||||
with open("a"), open("b") as bfile:
|
|
||||||
#? ['flush']
|
|
||||||
bfile.flush
|
|
||||||
|
|
||||||
# -----------------
|
# -----------------
|
||||||
# Avoiding multiple definitions
|
# Avoiding multiple definitions
|
||||||
# -----------------
|
# -----------------
|
||||||
@@ -424,13 +420,3 @@ with open("a"), open("b") as bfile:
|
|||||||
some_array = ['', '']
|
some_array = ['', '']
|
||||||
#! ['def upper']
|
#! ['def upper']
|
||||||
some_array[some_not_defined_index].upper
|
some_array[some_not_defined_index].upper
|
||||||
|
|
||||||
# -----------------
|
|
||||||
# operator
|
|
||||||
# -----------------
|
|
||||||
|
|
||||||
#? bool()
|
|
||||||
res = 'f' in 'foo'; res
|
|
||||||
|
|
||||||
#? bool()
|
|
||||||
res = not {}; res
|
|
||||||
|
|||||||
@@ -23,13 +23,7 @@ def inheritance_fixture():
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def capsysbinary(capsysbinary):
|
def testdir(testdir):
|
||||||
#? ['close']
|
#? ['chdir']
|
||||||
capsysbinary.clos
|
testdir.chdir
|
||||||
return capsysbinary
|
return testdir
|
||||||
|
|
||||||
|
|
||||||
# used when fixtures are defined in multiple files
|
|
||||||
pytest_plugins = [
|
|
||||||
"completion.fixture_module",
|
|
||||||
]
|
|
||||||
|
|||||||
@@ -21,9 +21,11 @@ class Y(X):
|
|||||||
#? []
|
#? []
|
||||||
def __doc__
|
def __doc__
|
||||||
|
|
||||||
#? []
|
# This might or might not be what we wanted, currently properties are also
|
||||||
def __class__
|
# used like this. IMO this is not wanted ~dave.
|
||||||
#? ['__class__']
|
#? ['__class__']
|
||||||
|
def __class__
|
||||||
|
#? []
|
||||||
__class__
|
__class__
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import uuid
|
|||||||
from django.db import models
|
from django.db import models
|
||||||
from django.contrib.auth.models import User
|
from django.contrib.auth.models import User
|
||||||
from django.db.models.query_utils import DeferredAttribute
|
from django.db.models.query_utils import DeferredAttribute
|
||||||
from django.db.models.manager import BaseManager
|
|
||||||
|
|
||||||
|
|
||||||
class TagManager(models.Manager):
|
class TagManager(models.Manager):
|
||||||
|
|||||||
@@ -284,13 +284,6 @@ def doctest_with_space():
|
|||||||
import_issu
|
import_issu
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def doctest_issue_github_1748():
|
|
||||||
"""From GitHub #1748
|
|
||||||
#? 10 []
|
|
||||||
This. Al
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def docstring_rst_identifiers():
|
def docstring_rst_identifiers():
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
# Exists only for completion/pytest.py
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def my_module_fixture():
|
|
||||||
return 1.0
|
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
# python >= 3.6
|
||||||
|
|
||||||
class Foo:
|
class Foo:
|
||||||
bar = 1
|
bar = 1
|
||||||
|
|
||||||
|
|||||||
@@ -309,8 +309,3 @@ def annotation2() -> Iterator[float]:
|
|||||||
next(annotation1())
|
next(annotation1())
|
||||||
#? float()
|
#? float()
|
||||||
next(annotation2())
|
next(annotation2())
|
||||||
|
|
||||||
|
|
||||||
# annotations should override generator inference
|
|
||||||
#? float()
|
|
||||||
annotation1()
|
|
||||||
|
|||||||
@@ -110,4 +110,4 @@ class Test(object):
|
|||||||
# nocond lambdas make no sense at all.
|
# nocond lambdas make no sense at all.
|
||||||
|
|
||||||
#? int()
|
#? int()
|
||||||
[a for a in [1,2] if (lambda: 3)][0]
|
[a for a in [1,2] if lambda: 3][0]
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
mod1_name = 'mod1'
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
mod2_name = 'mod2'
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
import sys
|
|
||||||
import os
|
|
||||||
from os.path import dirname
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.join(dirname(__file__), 'namespace2'))
|
|
||||||
sys.path.insert(0, os.path.join(dirname(__file__), 'namespace1'))
|
|
||||||
|
|
||||||
#? ['mod1']
|
|
||||||
import pkg1.pkg2.mod1
|
|
||||||
|
|
||||||
#? ['mod2']
|
|
||||||
import pkg1.pkg2.mod2
|
|
||||||
|
|
||||||
#? ['mod1_name']
|
|
||||||
pkg1.pkg2.mod1.mod1_name
|
|
||||||
|
|
||||||
#? ['mod2_name']
|
|
||||||
pkg1.pkg2.mod2.mod2_name
|
|
||||||
@@ -23,9 +23,11 @@ def builtin_test():
|
|||||||
import sqlite3
|
import sqlite3
|
||||||
|
|
||||||
# classes is a local module that has an __init__.py and can therefore not be
|
# classes is a local module that has an __init__.py and can therefore not be
|
||||||
# found.
|
# found. test can be found.
|
||||||
#? []
|
#? []
|
||||||
import classes
|
import classes
|
||||||
|
#? ['test']
|
||||||
|
import test
|
||||||
|
|
||||||
#? ['timedelta']
|
#? ['timedelta']
|
||||||
from datetime import timedel
|
from datetime import timedel
|
||||||
@@ -76,7 +78,7 @@ from import_tree.pkg.mod1 import not_existant,
|
|||||||
#? 22 ['mod1', 'base']
|
#? 22 ['mod1', 'base']
|
||||||
from import_tree.pkg. import mod1
|
from import_tree.pkg. import mod1
|
||||||
#? 17 ['mod1', 'mod2', 'random', 'pkg', 'references', 'rename1', 'rename2', 'classes', 'globals', 'recurse_class1', 'recurse_class2', 'invisible_pkg', 'flow_import']
|
#? 17 ['mod1', 'mod2', 'random', 'pkg', 'references', 'rename1', 'rename2', 'classes', 'globals', 'recurse_class1', 'recurse_class2', 'invisible_pkg', 'flow_import']
|
||||||
from import_tree. import new_pkg
|
from import_tree. import pkg
|
||||||
|
|
||||||
#? 18 ['pkg']
|
#? 18 ['pkg']
|
||||||
from import_tree.p import pkg
|
from import_tree.p import pkg
|
||||||
|
|||||||
@@ -180,11 +180,6 @@ def argskwargs(*args: int, **kwargs: float):
|
|||||||
#? float()
|
#? float()
|
||||||
kwargs['']
|
kwargs['']
|
||||||
|
|
||||||
class Test:
|
|
||||||
str: str = 'abc'
|
|
||||||
|
|
||||||
#? ['upper']
|
|
||||||
Test.str.upp
|
|
||||||
|
|
||||||
class NotCalledClass:
|
class NotCalledClass:
|
||||||
def __init__(self, x):
|
def __init__(self, x):
|
||||||
|
|||||||
@@ -1,50 +0,0 @@
|
|||||||
""" Pep-0484 type hinted decorators """
|
|
||||||
|
|
||||||
from typing import Callable
|
|
||||||
|
|
||||||
|
|
||||||
def decorator(func):
|
|
||||||
def wrapper(*a, **k):
|
|
||||||
return str(func(*a, **k))
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def typed_decorator(func: Callable[..., int]) -> Callable[..., str]:
|
|
||||||
...
|
|
||||||
|
|
||||||
# Functions
|
|
||||||
|
|
||||||
@decorator
|
|
||||||
def plain_func() -> int:
|
|
||||||
return 4
|
|
||||||
|
|
||||||
#? str()
|
|
||||||
plain_func()
|
|
||||||
|
|
||||||
|
|
||||||
@typed_decorator
|
|
||||||
def typed_func() -> int:
|
|
||||||
return 4
|
|
||||||
|
|
||||||
#? str()
|
|
||||||
typed_func()
|
|
||||||
|
|
||||||
|
|
||||||
# Methods
|
|
||||||
|
|
||||||
class X:
|
|
||||||
@decorator
|
|
||||||
def plain_method(self) -> int:
|
|
||||||
return 4
|
|
||||||
|
|
||||||
@typed_decorator
|
|
||||||
def typed_method(self) -> int:
|
|
||||||
return 4
|
|
||||||
|
|
||||||
inst = X()
|
|
||||||
|
|
||||||
#? str()
|
|
||||||
inst.plain_method()
|
|
||||||
|
|
||||||
#? str()
|
|
||||||
inst.typed_method()
|
|
||||||
@@ -27,13 +27,13 @@ class PlainClass(object):
|
|||||||
|
|
||||||
|
|
||||||
tpl = ("1", 2)
|
tpl = ("1", 2)
|
||||||
tpl_typed: Tuple[str, int] = ("2", 3)
|
tpl_typed = ("2", 3) # type: Tuple[str, int]
|
||||||
|
|
||||||
collection = {"a": 1}
|
collection = {"a": 1}
|
||||||
collection_typed: Dict[str, int] = {"a": 1}
|
collection_typed = {"a": 1} # type: Dict[str, int]
|
||||||
|
|
||||||
list_of_ints: List[int] = [42]
|
list_of_ints = [42] # type: List[int]
|
||||||
list_of_funcs: List[Callable[[T], T]] = [foo]
|
list_of_funcs = [foo] # type: List[Callable[[T], T]]
|
||||||
|
|
||||||
custom_generic = CustomGeneric(123.45)
|
custom_generic = CustomGeneric(123.45)
|
||||||
|
|
||||||
|
|||||||
@@ -19,12 +19,12 @@ T_co = TypeVar('T_co', covariant=True)
|
|||||||
V = TypeVar('V')
|
V = TypeVar('V')
|
||||||
|
|
||||||
|
|
||||||
just_float: float = 42.
|
just_float = 42. # type: float
|
||||||
optional_float: Optional[float] = 42.
|
optional_float = 42. # type: Optional[float]
|
||||||
list_of_ints: List[int] = [42]
|
list_of_ints = [42] # type: List[int]
|
||||||
list_of_floats: List[float] = [42.]
|
list_of_floats = [42.] # type: List[float]
|
||||||
list_of_optional_floats: List[Optional[float]] = [x or None for x in list_of_floats]
|
list_of_optional_floats = [x or None for x in list_of_floats] # type: List[Optional[float]]
|
||||||
list_of_ints_and_strs: List[Union[int, str]] = [42, 'abc']
|
list_of_ints_and_strs = [42, 'abc'] # type: List[Union[int, str]]
|
||||||
|
|
||||||
# Test that simple parameters are handled
|
# Test that simple parameters are handled
|
||||||
def list_t_to_list_t(the_list: List[T]) -> List[T]:
|
def list_t_to_list_t(the_list: List[T]) -> List[T]:
|
||||||
@@ -48,7 +48,7 @@ for z in list_t_to_list_t(list_of_ints_and_strs):
|
|||||||
z
|
z
|
||||||
|
|
||||||
|
|
||||||
list_of_int_type: List[Type[int]] = [int]
|
list_of_int_type = [int] # type: List[Type[int]]
|
||||||
|
|
||||||
# Test that nested parameters are handled
|
# Test that nested parameters are handled
|
||||||
def list_optional_t_to_list_t(the_list: List[Optional[T]]) -> List[T]:
|
def list_optional_t_to_list_t(the_list: List[Optional[T]]) -> List[T]:
|
||||||
@@ -85,7 +85,7 @@ def optional_list_t_to_list_t(x: Optional[List[T]]) -> List[T]:
|
|||||||
return x if x is not None else []
|
return x if x is not None else []
|
||||||
|
|
||||||
|
|
||||||
optional_list_float: Optional[List[float]] = None
|
optional_list_float = None # type: Optional[List[float]]
|
||||||
for xc in optional_list_t_to_list_t(optional_list_float):
|
for xc in optional_list_t_to_list_t(optional_list_float):
|
||||||
#? float()
|
#? float()
|
||||||
xc
|
xc
|
||||||
@@ -134,7 +134,7 @@ def list_tuple_t_to_tuple_list_t(the_list: List[Tuple[T]]) -> Tuple[List[T], ...
|
|||||||
return tuple(list(x) for x in the_list)
|
return tuple(list(x) for x in the_list)
|
||||||
|
|
||||||
|
|
||||||
list_of_int_tuples: List[Tuple[int]] = [(x,) for x in list_of_ints]
|
list_of_int_tuples = [(x,) for x in list_of_ints] # type: List[Tuple[int]]
|
||||||
|
|
||||||
for b in list_tuple_t_to_tuple_list_t(list_of_int_tuples):
|
for b in list_tuple_t_to_tuple_list_t(list_of_int_tuples):
|
||||||
#? int()
|
#? int()
|
||||||
@@ -145,7 +145,7 @@ def list_tuple_t_elipsis_to_tuple_list_t(the_list: List[Tuple[T, ...]]) -> Tuple
|
|||||||
return tuple(list(x) for x in the_list)
|
return tuple(list(x) for x in the_list)
|
||||||
|
|
||||||
|
|
||||||
list_of_int_tuple_elipsis: List[Tuple[int, ...]] = [tuple(list_of_ints)]
|
list_of_int_tuple_elipsis = [tuple(list_of_ints)] # type: List[Tuple[int, ...]]
|
||||||
|
|
||||||
for b in list_tuple_t_elipsis_to_tuple_list_t(list_of_int_tuple_elipsis):
|
for b in list_tuple_t_elipsis_to_tuple_list_t(list_of_int_tuple_elipsis):
|
||||||
#? int()
|
#? int()
|
||||||
@@ -157,7 +157,7 @@ def foo(x: int) -> int:
|
|||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
||||||
list_of_funcs: List[Callable[[int], int]] = [foo]
|
list_of_funcs = [foo] # type: List[Callable[[int], int]]
|
||||||
|
|
||||||
def list_func_t_to_list_func_type_t(the_list: List[Callable[[T], T]]) -> List[Callable[[Type[T]], T]]:
|
def list_func_t_to_list_func_type_t(the_list: List[Callable[[T], T]]) -> List[Callable[[Type[T]], T]]:
|
||||||
def adapt(func: Callable[[T], T]) -> Callable[[Type[T]], T]:
|
def adapt(func: Callable[[T], T]) -> Callable[[Type[T]], T]:
|
||||||
@@ -176,7 +176,7 @@ def bar(*a, **k) -> int:
|
|||||||
return len(a) + len(k)
|
return len(a) + len(k)
|
||||||
|
|
||||||
|
|
||||||
list_of_funcs_2: List[Callable[..., int]] = [bar]
|
list_of_funcs_2 = [bar] # type: List[Callable[..., int]]
|
||||||
|
|
||||||
def list_func_t_passthrough(the_list: List[Callable[..., T]]) -> List[Callable[..., T]]:
|
def list_func_t_passthrough(the_list: List[Callable[..., T]]) -> List[Callable[..., T]]:
|
||||||
return the_list
|
return the_list
|
||||||
@@ -187,7 +187,7 @@ for b in list_func_t_passthrough(list_of_funcs_2):
|
|||||||
b(None, x="x")
|
b(None, x="x")
|
||||||
|
|
||||||
|
|
||||||
mapping_int_str: Dict[int, str] = {42: 'a'}
|
mapping_int_str = {42: 'a'} # type: Dict[int, str]
|
||||||
|
|
||||||
# Test that mappings (that have more than one parameter) are handled
|
# Test that mappings (that have more than one parameter) are handled
|
||||||
def invert_mapping(mapping: Mapping[K, V]) -> Mapping[V, K]:
|
def invert_mapping(mapping: Mapping[K, V]) -> Mapping[V, K]:
|
||||||
@@ -210,11 +210,11 @@ first(mapping_int_str)
|
|||||||
#? str()
|
#? str()
|
||||||
first("abc")
|
first("abc")
|
||||||
|
|
||||||
some_str: str = NotImplemented
|
some_str = NotImplemented # type: str
|
||||||
#? str()
|
#? str()
|
||||||
first(some_str)
|
first(some_str)
|
||||||
|
|
||||||
annotated: List[ Callable[[Sequence[float]], int] ] = [len]
|
annotated = [len] # type: List[ Callable[[Sequence[float]], int] ]
|
||||||
#? int()
|
#? int()
|
||||||
first(annotated)()
|
first(annotated)()
|
||||||
|
|
||||||
@@ -237,7 +237,7 @@ for b in values(mapping_int_str):
|
|||||||
#
|
#
|
||||||
# Tests that user-defined generic types are handled
|
# Tests that user-defined generic types are handled
|
||||||
#
|
#
|
||||||
list_ints: List[int] = [42]
|
list_ints = [42] # type: List[int]
|
||||||
|
|
||||||
class CustomGeneric(Generic[T_co]):
|
class CustomGeneric(Generic[T_co]):
|
||||||
def __init__(self, val: T_co) -> None:
|
def __init__(self, val: T_co) -> None:
|
||||||
@@ -248,7 +248,7 @@ class CustomGeneric(Generic[T_co]):
|
|||||||
def custom(x: CustomGeneric[T]) -> T:
|
def custom(x: CustomGeneric[T]) -> T:
|
||||||
return x.val
|
return x.val
|
||||||
|
|
||||||
custom_instance: CustomGeneric[int] = CustomGeneric(42)
|
custom_instance = CustomGeneric(42) # type: CustomGeneric[int]
|
||||||
|
|
||||||
#? int()
|
#? int()
|
||||||
custom(custom_instance)
|
custom(custom_instance)
|
||||||
@@ -275,7 +275,7 @@ for x5 in wrap_custom(list_ints):
|
|||||||
|
|
||||||
|
|
||||||
# Test extraction of type from a nested custom generic type
|
# Test extraction of type from a nested custom generic type
|
||||||
list_custom_instances: List[CustomGeneric[int]] = [CustomGeneric(42)]
|
list_custom_instances = [CustomGeneric(42)] # type: List[CustomGeneric[int]]
|
||||||
|
|
||||||
def unwrap_custom(iterable: Iterable[CustomGeneric[T]]) -> List[T]:
|
def unwrap_custom(iterable: Iterable[CustomGeneric[T]]) -> List[T]:
|
||||||
return [x.val for x in iterable]
|
return [x.val for x in iterable]
|
||||||
@@ -303,7 +303,7 @@ for xg in unwrap_custom(CustomGeneric(s) for s in 'abc'):
|
|||||||
|
|
||||||
|
|
||||||
# Test extraction of type from type parameer nested within a custom generic type
|
# Test extraction of type from type parameer nested within a custom generic type
|
||||||
custom_instance_list_int: CustomGeneric[List[int]] = CustomGeneric([42])
|
custom_instance_list_int = CustomGeneric([42]) # type: CustomGeneric[List[int]]
|
||||||
|
|
||||||
def unwrap_custom2(instance: CustomGeneric[Iterable[T]]) -> List[T]:
|
def unwrap_custom2(instance: CustomGeneric[Iterable[T]]) -> List[T]:
|
||||||
return list(instance.val)
|
return list(instance.val)
|
||||||
@@ -326,7 +326,7 @@ class Specialised(Mapping[int, str]):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
specialised_instance: Specialised = NotImplemented
|
specialised_instance = NotImplemented # type: Specialised
|
||||||
|
|
||||||
#? int()
|
#? int()
|
||||||
first(specialised_instance)
|
first(specialised_instance)
|
||||||
@@ -341,7 +341,7 @@ class ChildOfSpecialised(Specialised):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
child_of_specialised_instance: ChildOfSpecialised = NotImplemented
|
child_of_specialised_instance = NotImplemented # type: ChildOfSpecialised
|
||||||
|
|
||||||
#? int()
|
#? int()
|
||||||
first(child_of_specialised_instance)
|
first(child_of_specialised_instance)
|
||||||
@@ -355,13 +355,13 @@ class CustomPartialGeneric1(Mapping[str, T]):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
custom_partial1_instance: CustomPartialGeneric1[int] = NotImplemented
|
custom_partial1_instance = NotImplemented # type: CustomPartialGeneric1[int]
|
||||||
|
|
||||||
#? str()
|
#? str()
|
||||||
first(custom_partial1_instance)
|
first(custom_partial1_instance)
|
||||||
|
|
||||||
|
|
||||||
custom_partial1_unbound_instance: CustomPartialGeneric1 = NotImplemented
|
custom_partial1_unbound_instance = NotImplemented # type: CustomPartialGeneric1
|
||||||
|
|
||||||
#? str()
|
#? str()
|
||||||
first(custom_partial1_unbound_instance)
|
first(custom_partial1_unbound_instance)
|
||||||
@@ -371,7 +371,7 @@ class CustomPartialGeneric2(Mapping[T, str]):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
custom_partial2_instance: CustomPartialGeneric2[int] = NotImplemented
|
custom_partial2_instance = NotImplemented # type: CustomPartialGeneric2[int]
|
||||||
|
|
||||||
#? int()
|
#? int()
|
||||||
first(custom_partial2_instance)
|
first(custom_partial2_instance)
|
||||||
@@ -380,7 +380,7 @@ first(custom_partial2_instance)
|
|||||||
values(custom_partial2_instance)[0]
|
values(custom_partial2_instance)[0]
|
||||||
|
|
||||||
|
|
||||||
custom_partial2_unbound_instance: CustomPartialGeneric2 = NotImplemented
|
custom_partial2_unbound_instance = NotImplemented # type: CustomPartialGeneric2
|
||||||
|
|
||||||
#? []
|
#? []
|
||||||
first(custom_partial2_unbound_instance)
|
first(custom_partial2_unbound_instance)
|
||||||
|
|||||||
@@ -19,16 +19,16 @@ TTypeAny = TypeVar('TTypeAny', bound=Type[Any])
|
|||||||
TCallable = TypeVar('TCallable', bound=Callable[..., Any])
|
TCallable = TypeVar('TCallable', bound=Callable[..., Any])
|
||||||
|
|
||||||
untyped_list_str = ['abc', 'def']
|
untyped_list_str = ['abc', 'def']
|
||||||
typed_list_str: List[str] = ['abc', 'def']
|
typed_list_str = ['abc', 'def'] # type: List[str]
|
||||||
|
|
||||||
untyped_tuple_str = ('abc',)
|
untyped_tuple_str = ('abc',)
|
||||||
typed_tuple_str: Tuple[str] = ('abc',)
|
typed_tuple_str = ('abc',) # type: Tuple[str]
|
||||||
|
|
||||||
untyped_tuple_str_int = ('abc', 4)
|
untyped_tuple_str_int = ('abc', 4)
|
||||||
typed_tuple_str_int: Tuple[str, int] = ('abc', 4)
|
typed_tuple_str_int = ('abc', 4) # type: Tuple[str, int]
|
||||||
|
|
||||||
variadic_tuple_str: Tuple[str, ...] = ('abc',)
|
variadic_tuple_str = ('abc',) # type: Tuple[str, ...]
|
||||||
variadic_tuple_str_int: Tuple[Union[str, int], ...] = ('abc', 4)
|
variadic_tuple_str_int = ('abc', 4) # type: Tuple[Union[str, int], ...]
|
||||||
|
|
||||||
|
|
||||||
def untyped_passthrough(x):
|
def untyped_passthrough(x):
|
||||||
@@ -58,16 +58,6 @@ def typed_bound_generic_passthrough(x: TList) -> TList:
|
|||||||
|
|
||||||
return x
|
return x
|
||||||
|
|
||||||
# Forward references are more likely with custom types, however this aims to
|
|
||||||
# test just the handling of the quoted type rather than any other part of the
|
|
||||||
# machinery.
|
|
||||||
def typed_quoted_return_generic_passthrough(x: T) -> 'List[T]':
|
|
||||||
return [x]
|
|
||||||
|
|
||||||
def typed_quoted_input_generic_passthrough(x: 'Tuple[T]') -> T:
|
|
||||||
x
|
|
||||||
return x[0]
|
|
||||||
|
|
||||||
|
|
||||||
for a in untyped_passthrough(untyped_list_str):
|
for a in untyped_passthrough(untyped_list_str):
|
||||||
#? str()
|
#? str()
|
||||||
@@ -156,23 +146,6 @@ for q in typed_bound_generic_passthrough(typed_list_str):
|
|||||||
q
|
q
|
||||||
|
|
||||||
|
|
||||||
for r in typed_quoted_return_generic_passthrough("something"):
|
|
||||||
#? str()
|
|
||||||
r
|
|
||||||
|
|
||||||
for s in typed_quoted_return_generic_passthrough(42):
|
|
||||||
#? int()
|
|
||||||
s
|
|
||||||
|
|
||||||
|
|
||||||
#? str()
|
|
||||||
typed_quoted_input_generic_passthrough(("something",))
|
|
||||||
|
|
||||||
#? int()
|
|
||||||
typed_quoted_input_generic_passthrough((42,))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class CustomList(List):
|
class CustomList(List):
|
||||||
def get_first(self):
|
def get_first(self):
|
||||||
return self[0]
|
return self[0]
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
|
# python >= 3.6
|
||||||
from typing import List, Dict, overload, Tuple, TypeVar
|
from typing import List, Dict, overload, Tuple, TypeVar
|
||||||
|
|
||||||
lst: list
|
lst: list
|
||||||
list_alias: List
|
list_alias: List
|
||||||
list_str: List[str]
|
list_str: List[str]
|
||||||
list_int: List[int]
|
list_str: List[int]
|
||||||
|
|
||||||
# -------------------------
|
# -------------------------
|
||||||
# With base classes
|
# With base classes
|
||||||
|
|||||||
@@ -2,14 +2,18 @@
|
|||||||
Test the typing library, with docstrings and annotations
|
Test the typing library, with docstrings and annotations
|
||||||
"""
|
"""
|
||||||
import typing
|
import typing
|
||||||
from typing import Sequence, MutableSequence, List, Iterable, Iterator, \
|
|
||||||
AbstractSet, Tuple, Mapping, Dict, Union, Optional
|
|
||||||
|
|
||||||
class B:
|
class B:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def we_can_has_sequence(p: Sequence[int], q: Sequence[B], r: Sequence[int],
|
def we_can_has_sequence(p, q, r, s, t, u):
|
||||||
s: Sequence["int"], t: MutableSequence[dict], u: List[float]):
|
"""
|
||||||
|
:type p: typing.Sequence[int]
|
||||||
|
:type q: typing.Sequence[B]
|
||||||
|
:type r: typing.Sequence[int]
|
||||||
|
:type s: typing.Sequence["int"]
|
||||||
|
:type t: typing.MutableSequence[dict]
|
||||||
|
:type u: typing.List[float]
|
||||||
|
"""
|
||||||
#? ["count"]
|
#? ["count"]
|
||||||
p.c
|
p.c
|
||||||
#? int()
|
#? int()
|
||||||
@@ -39,8 +43,13 @@ def we_can_has_sequence(p: Sequence[int], q: Sequence[B], r: Sequence[int],
|
|||||||
#? float()
|
#? float()
|
||||||
u[1]
|
u[1]
|
||||||
|
|
||||||
def iterators(ps: Iterable[int], qs: Iterator[str], rs:
|
def iterators(ps, qs, rs, ts):
|
||||||
Sequence["ForwardReference"], ts: AbstractSet["float"]):
|
"""
|
||||||
|
:type ps: typing.Iterable[int]
|
||||||
|
:type qs: typing.Iterator[str]
|
||||||
|
:type rs: typing.Sequence["ForwardReference"]
|
||||||
|
:type ts: typing.AbstractSet["float"]
|
||||||
|
"""
|
||||||
for p in ps:
|
for p in ps:
|
||||||
#? int()
|
#? int()
|
||||||
p
|
p
|
||||||
@@ -70,13 +79,22 @@ def iterators(ps: Iterable[int], qs: Iterator[str], rs:
|
|||||||
#? float()
|
#? float()
|
||||||
t
|
t
|
||||||
|
|
||||||
def sets(p: AbstractSet[int], q: typing.MutableSet[float]):
|
def sets(p, q):
|
||||||
|
"""
|
||||||
|
:type p: typing.AbstractSet[int]
|
||||||
|
:type q: typing.MutableSet[float]
|
||||||
|
"""
|
||||||
#? []
|
#? []
|
||||||
p.a
|
p.a
|
||||||
#? ["add"]
|
#? ["add"]
|
||||||
q.a
|
q.a
|
||||||
|
|
||||||
def tuple(p: Tuple[int], q: Tuple[int, str, float], r: Tuple[B, ...]):
|
def tuple(p, q, r):
|
||||||
|
"""
|
||||||
|
:type p: typing.Tuple[int]
|
||||||
|
:type q: typing.Tuple[int, str, float]
|
||||||
|
:type r: typing.Tuple[B, ...]
|
||||||
|
"""
|
||||||
#? int()
|
#? int()
|
||||||
p[0]
|
p[0]
|
||||||
#? ['index']
|
#? ['index']
|
||||||
@@ -109,14 +127,16 @@ class Key:
|
|||||||
class Value:
|
class Value:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def mapping(
|
def mapping(p, q, d, dd, r, s, t):
|
||||||
p: Mapping[Key, Value],
|
"""
|
||||||
q: typing.MutableMapping[Key, Value],
|
:type p: typing.Mapping[Key, Value]
|
||||||
d: Dict[Key, Value],
|
:type q: typing.MutableMapping[Key, Value]
|
||||||
dd: typing.DefaultDict[Key, Value],
|
:type d: typing.Dict[Key, Value]
|
||||||
r: typing.KeysView[Key],
|
:type dd: typing.DefaultDict[Key, Value]
|
||||||
s: typing.ValuesView[Value],
|
:type r: typing.KeysView[Key]
|
||||||
t: typing.ItemsView[Key, Value]):
|
:type s: typing.ValuesView[Value]
|
||||||
|
:type t: typing.ItemsView[Key, Value]
|
||||||
|
"""
|
||||||
#? []
|
#? []
|
||||||
p.setd
|
p.setd
|
||||||
#? ["setdefault"]
|
#? ["setdefault"]
|
||||||
@@ -178,12 +198,14 @@ def mapping(
|
|||||||
#? Value()
|
#? Value()
|
||||||
value
|
value
|
||||||
|
|
||||||
def union(
|
def union(p, q, r, s, t):
|
||||||
p: Union[int],
|
"""
|
||||||
q: Union[int, int],
|
:type p: typing.Union[int]
|
||||||
r: Union[int, str, "int"],
|
:type q: typing.Union[int, int]
|
||||||
s: Union[int, typing.Union[str, "typing.Union['float', 'dict']"]],
|
:type r: typing.Union[int, str, "int"]
|
||||||
t: Union[int, None]):
|
:type s: typing.Union[int, typing.Union[str, "typing.Union['float', 'dict']"]]
|
||||||
|
:type t: typing.Union[int, None]
|
||||||
|
"""
|
||||||
#? int()
|
#? int()
|
||||||
p
|
p
|
||||||
#? int()
|
#? int()
|
||||||
@@ -195,8 +217,9 @@ def union(
|
|||||||
#? int() None
|
#? int() None
|
||||||
t
|
t
|
||||||
|
|
||||||
def optional(p: Optional[int]):
|
def optional(p):
|
||||||
"""
|
"""
|
||||||
|
:type p: typing.Optional[int]
|
||||||
Optional does not do anything special. However it should be recognised
|
Optional does not do anything special. However it should be recognised
|
||||||
as being of that type. Jedi doesn't do anything with the extra into that
|
as being of that type. Jedi doesn't do anything with the extra into that
|
||||||
it can be None as well
|
it can be None as well
|
||||||
@@ -211,7 +234,10 @@ class TestDict(typing.Dict[str, int]):
|
|||||||
def setdud(self):
|
def setdud(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def testdict(x: TestDict):
|
def testdict(x):
|
||||||
|
"""
|
||||||
|
:type x: TestDict
|
||||||
|
"""
|
||||||
#? ["setdud", "setdefault"]
|
#? ["setdud", "setdefault"]
|
||||||
x.setd
|
x.setd
|
||||||
for key in x.keys():
|
for key in x.keys():
|
||||||
@@ -236,7 +262,10 @@ y = WrappingType(0) # Per https://github.com/davidhalter/jedi/issues/1015#issuec
|
|||||||
#? str()
|
#? str()
|
||||||
y
|
y
|
||||||
|
|
||||||
def testnewtype(y: WrappingType):
|
def testnewtype(y):
|
||||||
|
"""
|
||||||
|
:type y: WrappingType
|
||||||
|
"""
|
||||||
#? str()
|
#? str()
|
||||||
y
|
y
|
||||||
#? ["upper"]
|
#? ["upper"]
|
||||||
@@ -244,7 +273,10 @@ def testnewtype(y: WrappingType):
|
|||||||
|
|
||||||
WrappingType2 = typing.NewType()
|
WrappingType2 = typing.NewType()
|
||||||
|
|
||||||
def testnewtype2(y: WrappingType2):
|
def testnewtype2(y):
|
||||||
|
"""
|
||||||
|
:type y: WrappingType2
|
||||||
|
"""
|
||||||
#?
|
#?
|
||||||
y
|
y
|
||||||
#? []
|
#? []
|
||||||
@@ -265,7 +297,10 @@ class TestDefaultDict(typing.DefaultDict[str, int]):
|
|||||||
def setdud(self):
|
def setdud(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def testdict(x: TestDefaultDict):
|
def testdict(x):
|
||||||
|
"""
|
||||||
|
:type x: TestDefaultDict
|
||||||
|
"""
|
||||||
#? ["setdud", "setdefault"]
|
#? ["setdud", "setdefault"]
|
||||||
x.setd
|
x.setd
|
||||||
for key in x.keys():
|
for key in x.keys():
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
"""
|
"""
|
||||||
PEP 526 introduced a way of using type annotations on variables.
|
PEP 526 introduced a new way of using type annotations on variables. It was
|
||||||
|
introduced in Python 3.6.
|
||||||
"""
|
"""
|
||||||
|
# python >= 3.6
|
||||||
|
|
||||||
import typing
|
import typing
|
||||||
|
|
||||||
asdf = ''
|
asdf = ''
|
||||||
@@ -44,7 +47,7 @@ class Foo():
|
|||||||
baz: typing.ClassVar[str]
|
baz: typing.ClassVar[str]
|
||||||
|
|
||||||
|
|
||||||
#? int()
|
#?
|
||||||
Foo.bar
|
Foo.bar
|
||||||
#? int()
|
#? int()
|
||||||
Foo().bar
|
Foo().bar
|
||||||
@@ -58,7 +61,6 @@ class VarClass:
|
|||||||
var_instance2: float
|
var_instance2: float
|
||||||
var_class1: typing.ClassVar[str] = 1
|
var_class1: typing.ClassVar[str] = 1
|
||||||
var_class2: typing.ClassVar[bytes]
|
var_class2: typing.ClassVar[bytes]
|
||||||
var_class3 = None
|
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
#? int()
|
#? int()
|
||||||
@@ -71,21 +73,15 @@ class VarClass:
|
|||||||
d.var_class2
|
d.var_class2
|
||||||
#? []
|
#? []
|
||||||
d.int
|
d.int
|
||||||
#? ['var_class1', 'var_class2', 'var_instance1', 'var_instance2', 'var_class3']
|
#? ['var_class1', 'var_class2', 'var_instance1', 'var_instance2']
|
||||||
self.var_
|
self.var_
|
||||||
|
|
||||||
class VarClass2(VarClass):
|
|
||||||
var_class3: typing.ClassVar[int]
|
|
||||||
|
|
||||||
def __init__(self):
|
#? ['var_class1', 'var_class2', 'var_instance1']
|
||||||
#? int()
|
|
||||||
self.var_class3
|
|
||||||
|
|
||||||
#? ['var_class1', 'var_class2', 'var_instance1', 'var_class3', 'var_instance2']
|
|
||||||
VarClass.var_
|
VarClass.var_
|
||||||
#? int()
|
#? int()
|
||||||
VarClass.var_instance1
|
VarClass.var_instance1
|
||||||
#? float()
|
#?
|
||||||
VarClass.var_instance2
|
VarClass.var_instance2
|
||||||
#? str()
|
#? str()
|
||||||
VarClass.var_class1
|
VarClass.var_class1
|
||||||
@@ -95,7 +91,7 @@ VarClass.var_class2
|
|||||||
VarClass.int
|
VarClass.int
|
||||||
|
|
||||||
d = VarClass()
|
d = VarClass()
|
||||||
#? ['var_class1', 'var_class2', 'var_class3', 'var_instance1', 'var_instance2']
|
#? ['var_class1', 'var_class2', 'var_instance1', 'var_instance2']
|
||||||
d.var_
|
d.var_
|
||||||
#? int()
|
#? int()
|
||||||
d.var_instance1
|
d.var_instance1
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
# python >= 3.9
|
|
||||||
|
|
||||||
from typing import Annotated
|
|
||||||
|
|
||||||
# This is just a dummy and very meaningless thing to use with to the Annotated
|
|
||||||
# type hint
|
|
||||||
class Foo:
|
|
||||||
pass
|
|
||||||
|
|
||||||
class A:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def annotated_function_params(
|
|
||||||
basic: Annotated[str, Foo()],
|
|
||||||
obj: A,
|
|
||||||
annotated_obj: Annotated[A, Foo()],
|
|
||||||
):
|
|
||||||
#? str()
|
|
||||||
basic
|
|
||||||
|
|
||||||
#? A()
|
|
||||||
obj
|
|
||||||
|
|
||||||
#? A()
|
|
||||||
annotated_obj
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
from pep0484_generic_parameters import list_t_to_list_t
|
|
||||||
|
|
||||||
list_of_ints_and_strs: list[int | str]
|
|
||||||
|
|
||||||
# Test that unions are handled
|
|
||||||
x2 = list_t_to_list_t(list_of_ints_and_strs)[0]
|
|
||||||
#? int() str()
|
|
||||||
x2
|
|
||||||
|
|
||||||
for z in list_t_to_list_t(list_of_ints_and_strs):
|
|
||||||
#? int() str()
|
|
||||||
z
|
|
||||||
|
|
||||||
|
|
||||||
from pep0484_generic_passthroughs import (
|
|
||||||
typed_variadic_tuple_generic_passthrough,
|
|
||||||
)
|
|
||||||
|
|
||||||
variadic_tuple_str_int: tuple[int | str, ...]
|
|
||||||
|
|
||||||
for m in typed_variadic_tuple_generic_passthrough(variadic_tuple_str_int):
|
|
||||||
#? str() int()
|
|
||||||
m
|
|
||||||
|
|
||||||
|
|
||||||
def func_returns_byteslike() -> bytes | bytearray:
|
|
||||||
pass
|
|
||||||
|
|
||||||
#? bytes() bytearray()
|
|
||||||
func_returns_byteslike()
|
|
||||||
|
|
||||||
|
|
||||||
pep604_optional_1: int | str | None
|
|
||||||
pep604_optional_2: None | bytes
|
|
||||||
|
|
||||||
#? int() str() None
|
|
||||||
pep604_optional_1
|
|
||||||
|
|
||||||
#? None bytes()
|
|
||||||
pep604_optional_2
|
|
||||||
|
|
||||||
|
|
||||||
pep604_in_str: "int | bytes"
|
|
||||||
|
|
||||||
#? int() bytes()
|
|
||||||
pep604_in_str
|
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
from typing import Generator
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from pytest import fixture
|
from pytest import fixture
|
||||||
|
|
||||||
@@ -66,11 +64,6 @@ def lala(my_fixture):
|
|||||||
def lala(my_fixture):
|
def lala(my_fixture):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# overriding types of a fixture should be possible
|
|
||||||
def test_x(my_yield_fixture: str):
|
|
||||||
#? str()
|
|
||||||
my_yield_fixture
|
|
||||||
|
|
||||||
# -----------------
|
# -----------------
|
||||||
# completion
|
# completion
|
||||||
# -----------------
|
# -----------------
|
||||||
@@ -96,9 +89,6 @@ def test_x(my_con
|
|||||||
#? 18 ['my_conftest_fixture']
|
#? 18 ['my_conftest_fixture']
|
||||||
def test_x(my_conftest_fixture):
|
def test_x(my_conftest_fixture):
|
||||||
return
|
return
|
||||||
#? ['my_module_fixture']
|
|
||||||
def test_x(my_modu
|
|
||||||
return
|
|
||||||
|
|
||||||
#? []
|
#? []
|
||||||
def lala(my_con
|
def lala(my_con
|
||||||
@@ -142,6 +132,9 @@ def test_p(monkeypatch):
|
|||||||
#? ['capsysbinary']
|
#? ['capsysbinary']
|
||||||
def test_p(capsysbin
|
def test_p(capsysbin
|
||||||
|
|
||||||
|
#? ['tmpdir', 'tmpdir_factory']
|
||||||
|
def test_p(tmpdi
|
||||||
|
|
||||||
|
|
||||||
def close_parens():
|
def close_parens():
|
||||||
pass
|
pass
|
||||||
@@ -171,40 +164,3 @@ def test_inheritance_fixture(inheritance_fixture, caplog):
|
|||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def caplog(caplog):
|
def caplog(caplog):
|
||||||
yield caplog
|
yield caplog
|
||||||
|
|
||||||
# -----------------
|
|
||||||
# Generator with annotation
|
|
||||||
# -----------------
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def with_annot() -> Generator[float, None, None]:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_with_annot(inheritance_fixture, with_annot):
|
|
||||||
#? float()
|
|
||||||
with_annot
|
|
||||||
|
|
||||||
# -----------------
|
|
||||||
# pytest external plugins
|
|
||||||
# -----------------
|
|
||||||
|
|
||||||
#? ['admin_user', 'admin_client']
|
|
||||||
def test_z(admin
|
|
||||||
|
|
||||||
#! 15 ['def admin_client']
|
|
||||||
def test_p(admin_client):
|
|
||||||
#? ['login', 'logout']
|
|
||||||
admin_client.log
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
@some_decorator
|
|
||||||
#? ['admin_user']
|
|
||||||
def bla(admin_u
|
|
||||||
return
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
@some_decorator
|
|
||||||
#! 12 ['def admin_user']
|
|
||||||
def bla(admin_user):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import subprocess
|
import subprocess
|
||||||
|
from itertools import count
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@@ -9,6 +10,9 @@ from . import run
|
|||||||
from . import refactor
|
from . import refactor
|
||||||
from jedi import InterpreterEnvironment, get_system_environment
|
from jedi import InterpreterEnvironment, get_system_environment
|
||||||
from jedi.inference.compiled.value import create_from_access_path
|
from jedi.inference.compiled.value import create_from_access_path
|
||||||
|
from jedi.inference.imports import _load_python_module
|
||||||
|
from jedi.file_io import KnownContentFileIO
|
||||||
|
from jedi.inference.base_value import ValueSet
|
||||||
from jedi.api.interpreter import MixedModuleContext
|
from jedi.api.interpreter import MixedModuleContext
|
||||||
|
|
||||||
# For interpreter tests sometimes the path of this directory is in the sys
|
# For interpreter tests sometimes the path of this directory is in the sys
|
||||||
@@ -159,6 +163,19 @@ def create_compiled_object(inference_state):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def module_injector():
|
||||||
|
counter = count()
|
||||||
|
|
||||||
|
def module_injector(inference_state, names, code):
|
||||||
|
assert isinstance(names, tuple)
|
||||||
|
file_io = KnownContentFileIO('/foo/bar/module-injector-%s.py' % next(counter), code)
|
||||||
|
v = _load_python_module(inference_state, file_io, names)
|
||||||
|
inference_state.module_cache.add(names, ValueSet([v]))
|
||||||
|
|
||||||
|
return module_injector
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(params=[False, True])
|
@pytest.fixture(params=[False, True])
|
||||||
def class_findable(monkeypatch, request):
|
def class_findable(monkeypatch, request):
|
||||||
if not request.param:
|
if not request.param:
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
from pytest import fixture
|
|
||||||
|
|
||||||
|
|
||||||
@fixture()
|
|
||||||
def admin_user():
|
|
||||||
pass
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
import pytest
|
|
||||||
|
|
||||||
from .fixtures import admin_user # noqa
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
|
||||||
def admin_client():
|
|
||||||
return Client()
|
|
||||||
|
|
||||||
|
|
||||||
class Client:
|
|
||||||
def login(self, **credentials):
|
|
||||||
...
|
|
||||||
|
|
||||||
def logout(self):
|
|
||||||
...
|
|
||||||
25
test/run.py
25
test/run.py
@@ -104,14 +104,10 @@ import os
|
|||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import operator
|
import operator
|
||||||
if sys.version_info < (3, 8):
|
from ast import literal_eval
|
||||||
literal_eval = eval
|
|
||||||
else:
|
|
||||||
from ast import literal_eval
|
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
from unittest.mock import ANY
|
from unittest.mock import ANY
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import parso
|
import parso
|
||||||
from _pytest.outcomes import Skipped
|
from _pytest.outcomes import Skipped
|
||||||
@@ -126,7 +122,6 @@ from jedi.api.environment import get_default_environment, get_system_environment
|
|||||||
from jedi.inference.gradual.conversion import convert_values
|
from jedi.inference.gradual.conversion import convert_values
|
||||||
from jedi.inference.analysis import Warning
|
from jedi.inference.analysis import Warning
|
||||||
|
|
||||||
test_dir = Path(__file__).absolute().parent
|
|
||||||
|
|
||||||
TEST_COMPLETIONS = 0
|
TEST_COMPLETIONS = 0
|
||||||
TEST_INFERENCE = 1
|
TEST_INFERENCE = 1
|
||||||
@@ -134,7 +129,7 @@ TEST_GOTO = 2
|
|||||||
TEST_REFERENCES = 3
|
TEST_REFERENCES = 3
|
||||||
|
|
||||||
|
|
||||||
grammar313 = parso.load_grammar(version='3.13')
|
grammar36 = parso.load_grammar(version='3.6')
|
||||||
|
|
||||||
|
|
||||||
class BaseTestCase(object):
|
class BaseTestCase(object):
|
||||||
@@ -178,7 +173,6 @@ class IntegrationTestCase(BaseTestCase):
|
|||||||
self.start = start
|
self.start = start
|
||||||
self.line = line
|
self.line = line
|
||||||
self.path = path
|
self.path = path
|
||||||
self._project = jedi.Project(test_dir)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def module_name(self):
|
def module_name(self):
|
||||||
@@ -194,12 +188,7 @@ class IntegrationTestCase(BaseTestCase):
|
|||||||
self.line_nr_test, self.line.rstrip())
|
self.line_nr_test, self.line.rstrip())
|
||||||
|
|
||||||
def script(self, environment):
|
def script(self, environment):
|
||||||
return jedi.Script(
|
return jedi.Script(self.source, path=self.path, environment=environment)
|
||||||
self.source,
|
|
||||||
path=self.path,
|
|
||||||
environment=environment,
|
|
||||||
project=self._project
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self, compare_cb, environment=None):
|
def run(self, compare_cb, environment=None):
|
||||||
testers = {
|
testers = {
|
||||||
@@ -209,7 +198,7 @@ class IntegrationTestCase(BaseTestCase):
|
|||||||
TEST_REFERENCES: self.run_get_references,
|
TEST_REFERENCES: self.run_get_references,
|
||||||
}
|
}
|
||||||
if (self.path.endswith('pytest.py') or self.path.endswith('conftest.py')) \
|
if (self.path.endswith('pytest.py') or self.path.endswith('conftest.py')) \
|
||||||
and os.path.realpath(environment.executable) != os.path.realpath(sys.executable):
|
and environment.executable != os.path.realpath(sys.executable):
|
||||||
# It's not guarantueed that pytest is installed in test
|
# It's not guarantueed that pytest is installed in test
|
||||||
# environments, if we're not running in the same environment that
|
# environments, if we're not running in the same environment that
|
||||||
# we're already in, so just skip that case.
|
# we're already in, so just skip that case.
|
||||||
@@ -238,7 +227,7 @@ class IntegrationTestCase(BaseTestCase):
|
|||||||
should_be = set()
|
should_be = set()
|
||||||
for match in re.finditer('(?:[^ ]+)', correct):
|
for match in re.finditer('(?:[^ ]+)', correct):
|
||||||
string = match.group(0)
|
string = match.group(0)
|
||||||
parser = grammar313.parse(string, start_symbol='eval_input', error_recovery=False)
|
parser = grammar36.parse(string, start_symbol='eval_input', error_recovery=False)
|
||||||
parser_utils.move(parser.get_root_node(), self.line_nr)
|
parser_utils.move(parser.get_root_node(), self.line_nr)
|
||||||
node = parser.get_root_node()
|
node = parser.get_root_node()
|
||||||
module_context = script._get_module_context()
|
module_context = script._get_module_context()
|
||||||
@@ -274,7 +263,7 @@ class IntegrationTestCase(BaseTestCase):
|
|||||||
self.correct = self.correct.strip()
|
self.correct = self.correct.strip()
|
||||||
compare = sorted(
|
compare = sorted(
|
||||||
(('stub:' if r.is_stub() else '')
|
(('stub:' if r.is_stub() else '')
|
||||||
+ re.sub(r'^completion\.', '', r.module_name),
|
+ re.sub(r'^test\.completion\.', '', r.module_name),
|
||||||
r.line,
|
r.line,
|
||||||
r.column)
|
r.column)
|
||||||
for r in result
|
for r in result
|
||||||
@@ -504,7 +493,7 @@ if __name__ == '__main__':
|
|||||||
if arguments['--env']:
|
if arguments['--env']:
|
||||||
environment = get_system_environment(arguments['--env'])
|
environment = get_system_environment(arguments['--env'])
|
||||||
else:
|
else:
|
||||||
# Will be 3.13.
|
# Will be 3.6.
|
||||||
environment = get_default_environment()
|
environment = get_default_environment()
|
||||||
|
|
||||||
import traceback
|
import traceback
|
||||||
|
|||||||
@@ -321,19 +321,10 @@ def test_docstrings_for_completions(Script):
|
|||||||
assert isinstance(c.docstring(), str)
|
assert isinstance(c.docstring(), str)
|
||||||
|
|
||||||
|
|
||||||
def test_completions_order_most_resemblance_on_top(Script):
|
|
||||||
"""Test that the completion which resembles the in-typing the most will come first."""
|
|
||||||
code = "from pathlib import Path\npath = Path('hello.txt')\n\npat"
|
|
||||||
script = Script(code)
|
|
||||||
# User is typing "pat" and "path" is closer to it than "Path".
|
|
||||||
assert ['path', 'Path'] == [comp.name for comp in script.complete()]
|
|
||||||
|
|
||||||
|
|
||||||
def test_fuzzy_completion(Script):
|
def test_fuzzy_completion(Script):
|
||||||
script = Script('string = "hello"\nstring.upper')
|
script = Script('string = "hello"\nstring.upper')
|
||||||
# 'isupper' is included because it is fuzzily matched.
|
assert ['isupper',
|
||||||
assert ['upper',
|
'upper'] == [comp.name for comp in script.complete(fuzzy=True)]
|
||||||
'isupper'] == [comp.name for comp in script.complete(fuzzy=True)]
|
|
||||||
|
|
||||||
|
|
||||||
def test_math_fuzzy_completion(Script, environment):
|
def test_math_fuzzy_completion(Script, environment):
|
||||||
|
|||||||
@@ -650,7 +650,6 @@ def test_cursor_after_signature(Script, column):
|
|||||||
('abs(chr ( \nclass y: pass', 1, 8, 'abs', 0),
|
('abs(chr ( \nclass y: pass', 1, 8, 'abs', 0),
|
||||||
('abs(chr ( \nclass y: pass', 1, 9, 'abs', 0),
|
('abs(chr ( \nclass y: pass', 1, 9, 'abs', 0),
|
||||||
('abs(chr ( \nclass y: pass', 1, 10, 'chr', 0),
|
('abs(chr ( \nclass y: pass', 1, 10, 'chr', 0),
|
||||||
('abs(foo.bar=3)', 1, 13, 'abs', 0),
|
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
def test_base_signatures(Script, code, line, column, name, index):
|
def test_base_signatures(Script, code, line, column, name, index):
|
||||||
|
|||||||
@@ -188,7 +188,10 @@ def test_functions_should_have_params(Script):
|
|||||||
assert c.get_signatures()
|
assert c.get_signatures()
|
||||||
|
|
||||||
|
|
||||||
def test_hashlib_params(Script):
|
def test_hashlib_params(Script, environment):
|
||||||
|
if environment.version_info < (3,):
|
||||||
|
pytest.skip()
|
||||||
|
|
||||||
script = Script('from hashlib import sha256')
|
script = Script('from hashlib import sha256')
|
||||||
c, = script.complete()
|
c, = script.complete()
|
||||||
sig, = c.get_signatures()
|
sig, = c.get_signatures()
|
||||||
@@ -348,8 +351,8 @@ def test_parent_on_comprehension(Script):
|
|||||||
|
|
||||||
def test_type(Script):
|
def test_type(Script):
|
||||||
for c in Script('a = [str()]; a[0].').complete():
|
for c in Script('a = [str()]; a[0].').complete():
|
||||||
if c.name == '__class__':
|
if c.name == '__class__' and False: # TODO fix.
|
||||||
assert c.type == 'property'
|
assert c.type == 'class'
|
||||||
else:
|
else:
|
||||||
assert c.type in ('function', 'statement')
|
assert c.type in ('function', 'statement')
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user