forked from VimPlug/jedi
Compare commits
147 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0bf8a69024 | ||
|
|
9bb8f335c9 | ||
|
|
8d313e014f | ||
|
|
a79d386eba | ||
|
|
48b137a7f5 | ||
|
|
b4a4dacebd | ||
|
|
efd8861d62 | ||
|
|
2f86f549f5 | ||
|
|
cc0c4cc308 | ||
|
|
9617d4527d | ||
|
|
86ae11eb43 | ||
|
|
078595f8d7 | ||
|
|
76417cc3c1 | ||
|
|
70800a6dc2 | ||
|
|
4711b85b50 | ||
|
|
368bf7e58a | ||
|
|
3bdb941daa | ||
|
|
bd1010bbd2 | ||
|
|
23b3327b1d | ||
|
|
075577d50c | ||
|
|
96b57f46cb | ||
|
|
c24eb4bd67 | ||
|
|
862f611829 | ||
|
|
f9cbc65f2d | ||
|
|
e1f9624bd4 | ||
|
|
6a2a2a9fa1 | ||
|
|
4545d91929 | ||
|
|
a2b984ce24 | ||
|
|
6bc79b4933 | ||
|
|
b9127147e4 | ||
|
|
afb2755c27 | ||
|
|
56bd795100 | ||
|
|
cdb760487b | ||
|
|
fc9a55b042 | ||
|
|
1cf5b194ca | ||
|
|
84b07a8809 | ||
|
|
6c555e62aa | ||
|
|
d6306a06a4 | ||
|
|
8fc2add242 | ||
|
|
4a593f9693 | ||
|
|
38a22a4ae8 | ||
|
|
10ecb77673 | ||
|
|
357c86ad9c | ||
|
|
8cae517821 | ||
|
|
0101fdd9da | ||
|
|
e17d7f5d42 | ||
|
|
7d16a35693 | ||
|
|
1456a156a6 | ||
|
|
3d55b2d826 | ||
|
|
1547177128 | ||
|
|
bd43608f98 | ||
|
|
72f2a9e4a5 | ||
|
|
b91203820c | ||
|
|
71572e63cd | ||
|
|
7c9f24a18e | ||
|
|
9ca7b30e38 | ||
|
|
fd8f254ce1 | ||
|
|
1c76359291 | ||
|
|
ccb460b433 | ||
|
|
30d14ea016 | ||
|
|
bbb1502e06 | ||
|
|
f34a9281b9 | ||
|
|
95a1a69771 | ||
|
|
1a4be5c91c | ||
|
|
40d3abe2b2 | ||
|
|
f25310e0b9 | ||
|
|
e576457a43 | ||
|
|
a1314ac3c1 | ||
|
|
481e6bcff0 | ||
|
|
9ff5050d01 | ||
|
|
9a4a96b453 | ||
|
|
e7a019e628 | ||
|
|
4b276bae87 | ||
|
|
ad5170a37a | ||
|
|
d292333dab | ||
|
|
a408fb3211 | ||
|
|
3cabc4b969 | ||
|
|
fb360506fb | ||
|
|
fe1799d125 | ||
|
|
733919e34c | ||
|
|
10b61c41f4 | ||
|
|
08b0b668a6 | ||
|
|
72a8ceed76 | ||
|
|
1e796fc08d | ||
|
|
2fc91ceb64 | ||
|
|
f6bc166ea7 | ||
|
|
08fa7941ce | ||
|
|
3a62d54403 | ||
|
|
748946349f | ||
|
|
71cea7200b | ||
|
|
87d7c59c6e | ||
|
|
f3c1f4c548 | ||
|
|
d06e55aab5 | ||
|
|
cef769ecd8 | ||
|
|
aa4dcc1631 | ||
|
|
a59e5a016f | ||
|
|
37a40d53a8 | ||
|
|
d8c0d8e5d2 | ||
|
|
508ed7e5b8 | ||
|
|
a12d62e9c9 | ||
|
|
2500112f6c | ||
|
|
6cdc1bcd8a | ||
|
|
80831d79c2 | ||
|
|
d857668292 | ||
|
|
f4aad8bbfe | ||
|
|
5b7984c4d4 | ||
|
|
2b1cbe4d42 | ||
|
|
8ffdf6746f | ||
|
|
a79a1fbef5 | ||
|
|
58141f1e1e | ||
|
|
e0e2be3027 | ||
|
|
1e7662c3e1 | ||
|
|
68974aee58 | ||
|
|
c208d37ac4 | ||
|
|
38474061cf | ||
|
|
95f835a014 | ||
|
|
282c6a2ba1 | ||
|
|
ea71dedaa1 | ||
|
|
106b11f1af | ||
|
|
f9e90e863b | ||
|
|
197aa22f29 | ||
|
|
e96ebbe88f | ||
|
|
55941e506b | ||
|
|
ff4a77391a | ||
|
|
70c2fce9c2 | ||
|
|
5dab97a303 | ||
|
|
e2cd228aad | ||
|
|
c1014e00ca | ||
|
|
62a3f99594 | ||
|
|
6ebe3f87a3 | ||
|
|
50812b5836 | ||
|
|
d10eff5625 | ||
|
|
6748faa071 | ||
|
|
fc14aad8f2 | ||
|
|
3c909a9849 | ||
|
|
b94b45cfa1 | ||
|
|
a95274d66f | ||
|
|
8d48e7453a | ||
|
|
91499565a9 | ||
|
|
ba96c21f83 | ||
|
|
8494164b22 | ||
|
|
4075c384e6 | ||
|
|
0bcd1701f0 | ||
|
|
ceb5509170 | ||
|
|
88243d2408 | ||
|
|
5f37d08761 | ||
|
|
aa6857d22d |
16
.travis.yml
16
.travis.yml
@@ -2,14 +2,12 @@ language: python
|
|||||||
sudo: true
|
sudo: true
|
||||||
python:
|
python:
|
||||||
- 2.7
|
- 2.7
|
||||||
- 3.3
|
|
||||||
- 3.4
|
- 3.4
|
||||||
- 3.5
|
- 3.5
|
||||||
- 3.6
|
- 3.6
|
||||||
|
|
||||||
env:
|
env:
|
||||||
- JEDI_TEST_ENVIRONMENT=27
|
- JEDI_TEST_ENVIRONMENT=27
|
||||||
- JEDI_TEST_ENVIRONMENT=33
|
|
||||||
- JEDI_TEST_ENVIRONMENT=34
|
- JEDI_TEST_ENVIRONMENT=34
|
||||||
- JEDI_TEST_ENVIRONMENT=35
|
- JEDI_TEST_ENVIRONMENT=35
|
||||||
- JEDI_TEST_ENVIRONMENT=36
|
- JEDI_TEST_ENVIRONMENT=36
|
||||||
@@ -24,9 +22,6 @@ matrix:
|
|||||||
allow_failures:
|
allow_failures:
|
||||||
- python: pypy
|
- python: pypy
|
||||||
- env: TOXENV=sith
|
- env: TOXENV=sith
|
||||||
- env:
|
|
||||||
- TOXENV=cov
|
|
||||||
- JEDI_TEST_ENVIRONMENT=36
|
|
||||||
- python: 3.7-dev
|
- python: 3.7-dev
|
||||||
include:
|
include:
|
||||||
- python: 3.6
|
- python: 3.6
|
||||||
@@ -43,7 +38,6 @@ before_install:
|
|||||||
- ./travis_install.sh
|
- ./travis_install.sh
|
||||||
# Need to add the path to the Python versions in the end. This might add
|
# Need to add the path to the Python versions in the end. This might add
|
||||||
# something twice, but it doesn't really matter, because they are appended.
|
# something twice, but it doesn't really matter, because they are appended.
|
||||||
- export PATH=$PATH:/opt/python/3.3/bin
|
|
||||||
- export PATH=$PATH:/opt/python/3.5/bin
|
- export PATH=$PATH:/opt/python/3.5/bin
|
||||||
# 3.6 was not installed manually, but already is on the system. However
|
# 3.6 was not installed manually, but already is on the system. However
|
||||||
# it's not on path (unless 3.6 is selected).
|
# it's not on path (unless 3.6 is selected).
|
||||||
@@ -53,7 +47,11 @@ install:
|
|||||||
script:
|
script:
|
||||||
- tox
|
- tox
|
||||||
after_script:
|
after_script:
|
||||||
- if [ $TOXENV == "cov" ]; then
|
- |
|
||||||
pip install --quiet coveralls;
|
if [ $TOXENV == "cov" ]; then
|
||||||
coveralls;
|
pip install --quiet codecov coveralls
|
||||||
|
coverage xml
|
||||||
|
coverage report -m
|
||||||
|
coveralls
|
||||||
|
bash <(curl -s https://codecov.io/bash) -X gcov -X coveragepy -X search -X fix -X xcode -f coverage.xml
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -50,5 +50,6 @@ Anton Zub (@zabulazza)
|
|||||||
Maksim Novikov (@m-novikov) <mnovikov.work@gmail.com>
|
Maksim Novikov (@m-novikov) <mnovikov.work@gmail.com>
|
||||||
Tobias Rzepka (@TobiasRzepka)
|
Tobias Rzepka (@TobiasRzepka)
|
||||||
micbou (@micbou)
|
micbou (@micbou)
|
||||||
|
Dima Gerasimov (@karlicoss) <karlicoss@gmail.com>
|
||||||
|
|
||||||
Note: (@user) means a github user name.
|
Note: (@user) means a github user name.
|
||||||
|
|||||||
@@ -3,6 +3,41 @@
|
|||||||
Changelog
|
Changelog
|
||||||
---------
|
---------
|
||||||
|
|
||||||
|
0.13.3 (2019-02-24)
|
||||||
|
+++++++++++++++++++
|
||||||
|
|
||||||
|
- Fixed an issue with embedded Pytho, see https://github.com/davidhalter/jedi-vim/issues/870
|
||||||
|
|
||||||
|
0.13.2 (2018-12-15)
|
||||||
|
+++++++++++++++++++
|
||||||
|
|
||||||
|
- Fixed a bug that led to Jedi spawning a lot of subprocesses.
|
||||||
|
|
||||||
|
0.13.1 (2018-10-02)
|
||||||
|
+++++++++++++++++++
|
||||||
|
|
||||||
|
- Bugfixes, because tensorflow completions were still slow.
|
||||||
|
|
||||||
|
0.13.0 (2018-10-02)
|
||||||
|
+++++++++++++++++++
|
||||||
|
|
||||||
|
- A small release. Some bug fixes.
|
||||||
|
- Remove Python 3.3 support. Python 3.3 support has been dropped by the Python
|
||||||
|
foundation.
|
||||||
|
- Default environments are now using the same Python version as the Python
|
||||||
|
process. In 0.12.x, we used to load the latest Python version on the system.
|
||||||
|
- Added ``include_builtins`` as a parameter to usages.
|
||||||
|
- ``goto_assignments`` has a new ``follow_builtin_imports`` parameter that
|
||||||
|
changes the previous behavior slightly.
|
||||||
|
|
||||||
|
0.12.1 (2018-06-30)
|
||||||
|
+++++++++++++++++++
|
||||||
|
|
||||||
|
- This release forces you to upgrade parso. If you don't, nothing will work
|
||||||
|
anymore. Otherwise changes should be limited to bug fixes. Unfortunately Jedi
|
||||||
|
still uses a few internals of parso that make it hard to keep compatibility
|
||||||
|
over multiple releases. Parso >=0.3.0 is going to be needed.
|
||||||
|
|
||||||
0.12.0 (2018-04-15)
|
0.12.0 (2018-04-15)
|
||||||
+++++++++++++++++++
|
+++++++++++++++++++
|
||||||
|
|
||||||
|
|||||||
@@ -5,4 +5,4 @@ Pull Requests are great.
|
|||||||
3. Add your name to AUTHORS.txt
|
3. Add your name to AUTHORS.txt
|
||||||
4. Push to your fork and submit a pull request.
|
4. Push to your fork and submit a pull request.
|
||||||
|
|
||||||
**Try to use the PEP8 style guide.**
|
**Try to use the PEP8 style guide** (and it's ok to have a line length of 100 characters).
|
||||||
|
|||||||
@@ -111,8 +111,8 @@ understands, see: `Features
|
|||||||
<https://jedi.readthedocs.org/en/latest/docs/features.html>`_. A list of
|
<https://jedi.readthedocs.org/en/latest/docs/features.html>`_. A list of
|
||||||
caveats can be found on the same page.
|
caveats can be found on the same page.
|
||||||
|
|
||||||
You can run Jedi on CPython 2.7 or 3.3+ but it should also
|
You can run Jedi on CPython 2.7 or 3.4+ but it should also
|
||||||
understand/parse code older than those versions. Additonally you should be able
|
understand/parse code older than those versions. Additionally you should be able
|
||||||
to use `Virtualenvs <https://jedi.readthedocs.org/en/latest/docs/api.html#environments>`_
|
to use `Virtualenvs <https://jedi.readthedocs.org/en/latest/docs/api.html#environments>`_
|
||||||
very well.
|
very well.
|
||||||
|
|
||||||
@@ -122,7 +122,7 @@ Tips on how to use Jedi efficiently can be found `here
|
|||||||
API
|
API
|
||||||
---
|
---
|
||||||
|
|
||||||
You can find the documentation for the `API here <https://jedi.readthedocs.org/en/latest/docs/plugin-api.html>`_.
|
You can find the documentation for the `API here <https://jedi.readthedocs.org/en/latest/docs/api.html>`_.
|
||||||
|
|
||||||
|
|
||||||
Autocompletion / Goto / Pydoc
|
Autocompletion / Goto / Pydoc
|
||||||
|
|||||||
28
appveyor.yml
28
appveyor.yml
@@ -3,9 +3,6 @@ environment:
|
|||||||
- TOXENV: py27
|
- TOXENV: py27
|
||||||
PYTHON_PATH: C:\Python27
|
PYTHON_PATH: C:\Python27
|
||||||
JEDI_TEST_ENVIRONMENT: 27
|
JEDI_TEST_ENVIRONMENT: 27
|
||||||
- TOXENV: py27
|
|
||||||
PYTHON_PATH: C:\Python27
|
|
||||||
JEDI_TEST_ENVIRONMENT: 33
|
|
||||||
- TOXENV: py27
|
- TOXENV: py27
|
||||||
PYTHON_PATH: C:\Python27
|
PYTHON_PATH: C:\Python27
|
||||||
JEDI_TEST_ENVIRONMENT: 34
|
JEDI_TEST_ENVIRONMENT: 34
|
||||||
@@ -16,28 +13,9 @@ environment:
|
|||||||
PYTHON_PATH: C:\Python27
|
PYTHON_PATH: C:\Python27
|
||||||
JEDI_TEST_ENVIRONMENT: 36
|
JEDI_TEST_ENVIRONMENT: 36
|
||||||
|
|
||||||
- TOXENV: py33
|
|
||||||
PYTHON_PATH: C:\Python33
|
|
||||||
JEDI_TEST_ENVIRONMENT: 27
|
|
||||||
- TOXENV: py33
|
|
||||||
PYTHON_PATH: C:\Python33
|
|
||||||
JEDI_TEST_ENVIRONMENT: 33
|
|
||||||
- TOXENV: py33
|
|
||||||
PYTHON_PATH: C:\Python33
|
|
||||||
JEDI_TEST_ENVIRONMENT: 34
|
|
||||||
- TOXENV: py33
|
|
||||||
PYTHON_PATH: C:\Python33
|
|
||||||
JEDI_TEST_ENVIRONMENT: 35
|
|
||||||
- TOXENV: py33
|
|
||||||
PYTHON_PATH: C:\Python33
|
|
||||||
JEDI_TEST_ENVIRONMENT: 36
|
|
||||||
|
|
||||||
- TOXENV: py34
|
- TOXENV: py34
|
||||||
PYTHON_PATH: C:\Python34
|
PYTHON_PATH: C:\Python34
|
||||||
JEDI_TEST_ENVIRONMENT: 27
|
JEDI_TEST_ENVIRONMENT: 27
|
||||||
- TOXENV: py34
|
|
||||||
PYTHON_PATH: C:\Python34
|
|
||||||
JEDI_TEST_ENVIRONMENT: 33
|
|
||||||
- TOXENV: py34
|
- TOXENV: py34
|
||||||
PYTHON_PATH: C:\Python34
|
PYTHON_PATH: C:\Python34
|
||||||
JEDI_TEST_ENVIRONMENT: 34
|
JEDI_TEST_ENVIRONMENT: 34
|
||||||
@@ -51,9 +29,6 @@ environment:
|
|||||||
- TOXENV: py35
|
- TOXENV: py35
|
||||||
PYTHON_PATH: C:\Python35
|
PYTHON_PATH: C:\Python35
|
||||||
JEDI_TEST_ENVIRONMENT: 27
|
JEDI_TEST_ENVIRONMENT: 27
|
||||||
- TOXENV: py35
|
|
||||||
PYTHON_PATH: C:\Python35
|
|
||||||
JEDI_TEST_ENVIRONMENT: 33
|
|
||||||
- TOXENV: py35
|
- TOXENV: py35
|
||||||
PYTHON_PATH: C:\Python35
|
PYTHON_PATH: C:\Python35
|
||||||
JEDI_TEST_ENVIRONMENT: 34
|
JEDI_TEST_ENVIRONMENT: 34
|
||||||
@@ -67,9 +42,6 @@ environment:
|
|||||||
- TOXENV: py36
|
- TOXENV: py36
|
||||||
PYTHON_PATH: C:\Python36
|
PYTHON_PATH: C:\Python36
|
||||||
JEDI_TEST_ENVIRONMENT: 27
|
JEDI_TEST_ENVIRONMENT: 27
|
||||||
- TOXENV: py36
|
|
||||||
PYTHON_PATH: C:\Python36
|
|
||||||
JEDI_TEST_ENVIRONMENT: 33
|
|
||||||
- TOXENV: py36
|
- TOXENV: py36
|
||||||
PYTHON_PATH: C:\Python36
|
PYTHON_PATH: C:\Python36
|
||||||
JEDI_TEST_ENVIRONMENT: 34
|
JEDI_TEST_ENVIRONMENT: 34
|
||||||
|
|||||||
11
conftest.py
11
conftest.py
@@ -6,7 +6,7 @@ from functools import partial
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
import jedi
|
import jedi
|
||||||
from jedi.api.environment import get_default_environment, get_system_environment
|
from jedi.api.environment import get_system_environment, InterpreterEnvironment
|
||||||
from jedi._compatibility import py_version
|
from jedi._compatibility import py_version
|
||||||
|
|
||||||
collect_ignore = [
|
collect_ignore = [
|
||||||
@@ -41,6 +41,9 @@ def pytest_addoption(parser):
|
|||||||
|
|
||||||
parser.addoption("--env", action='store',
|
parser.addoption("--env", action='store',
|
||||||
help="Execute the tests in that environment (e.g. 35 for python3.5).")
|
help="Execute the tests in that environment (e.g. 35 for python3.5).")
|
||||||
|
parser.addoption("--interpreter-env", "-I", action='store_true',
|
||||||
|
help="Don't use subprocesses to guarantee having safe "
|
||||||
|
"code execution. Useful for debugging.")
|
||||||
|
|
||||||
|
|
||||||
def pytest_configure(config):
|
def pytest_configure(config):
|
||||||
@@ -87,13 +90,13 @@ def clean_jedi_cache(request):
|
|||||||
|
|
||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
def environment(request):
|
def environment(request):
|
||||||
|
if request.config.option.interpreter_env:
|
||||||
|
return InterpreterEnvironment()
|
||||||
|
|
||||||
version = request.config.option.env
|
version = request.config.option.env
|
||||||
if version is None:
|
if version is None:
|
||||||
version = os.environ.get('JEDI_TEST_ENVIRONMENT', str(py_version))
|
version = os.environ.get('JEDI_TEST_ENVIRONMENT', str(py_version))
|
||||||
|
|
||||||
if int(version) == py_version:
|
|
||||||
return get_default_environment()
|
|
||||||
|
|
||||||
return get_system_environment(version[0] + '.' + version[1:])
|
return get_system_environment(version[0] + '.' + version[1:])
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ make it work.
|
|||||||
General Features
|
General Features
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
- Python 2.7 and 3.3+ support
|
- Python 2.7 and 3.4+ support
|
||||||
- Ignores syntax errors and wrong indentation
|
- Ignores syntax errors and wrong indentation
|
||||||
- Can deal with complex module / function / class structures
|
- Can deal with complex module / function / class structures
|
||||||
- Great Virtualenv support
|
- Great Virtualenv support
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ example for the autocompletion feature:
|
|||||||
... datetime.da'''
|
... datetime.da'''
|
||||||
>>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py')
|
>>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py')
|
||||||
>>> script
|
>>> script
|
||||||
<Script: 'example.py'>
|
<Script: 'example.py' ...>
|
||||||
>>> completions = script.completions()
|
>>> completions = script.completions()
|
||||||
>>> completions #doctest: +ELLIPSIS
|
>>> completions #doctest: +ELLIPSIS
|
||||||
[<Completion: date>, <Completion: datetime>, ...]
|
[<Completion: date>, <Completion: datetime>, ...]
|
||||||
@@ -36,7 +36,7 @@ As you see Jedi is pretty simple and allows you to concentrate on writing a
|
|||||||
good text editor, while still having very good IDE features for Python.
|
good text editor, while still having very good IDE features for Python.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = '0.12.0'
|
__version__ = '0.13.3'
|
||||||
|
|
||||||
from jedi.api import Script, Interpreter, set_debug_function, \
|
from jedi.api import Script, Interpreter, set_debug_function, \
|
||||||
preload_module, names
|
preload_module, names
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
To ensure compatibility from Python ``2.7`` - ``3.x``, a module has been
|
To ensure compatibility from Python ``2.7`` - ``3.x``, a module has been
|
||||||
created. Clearly there is huge need to use conforming syntax.
|
created. Clearly there is huge need to use conforming syntax.
|
||||||
"""
|
"""
|
||||||
import binascii
|
|
||||||
import errno
|
import errno
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
@@ -17,8 +16,6 @@ except ImportError:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
is_py3 = sys.version_info[0] >= 3
|
is_py3 = sys.version_info[0] >= 3
|
||||||
is_py33 = is_py3 and sys.version_info[1] >= 3
|
|
||||||
is_py34 = is_py3 and sys.version_info[1] >= 4
|
|
||||||
is_py35 = is_py3 and sys.version_info[1] >= 5
|
is_py35 = is_py3 and sys.version_info[1] >= 5
|
||||||
py_version = int(str(sys.version_info[0]) + str(sys.version_info[1]))
|
py_version = int(str(sys.version_info[0]) + str(sys.version_info[1]))
|
||||||
|
|
||||||
@@ -35,24 +32,36 @@ class DummyFile(object):
|
|||||||
del self.loader
|
del self.loader
|
||||||
|
|
||||||
|
|
||||||
def find_module_py34(string, path=None, full_name=None):
|
def find_module_py34(string, path=None, full_name=None, is_global_search=True):
|
||||||
spec = None
|
spec = None
|
||||||
loader = None
|
loader = None
|
||||||
|
|
||||||
spec = importlib.machinery.PathFinder.find_spec(string, path)
|
for finder in sys.meta_path:
|
||||||
if spec is not None:
|
if is_global_search and finder != importlib.machinery.PathFinder:
|
||||||
# We try to disambiguate implicit namespace pkgs with non implicit namespace pkgs
|
p = None
|
||||||
if not spec.has_location:
|
else:
|
||||||
full_name = string if not path else full_name
|
p = path
|
||||||
implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path)
|
try:
|
||||||
return None, implicit_ns_info, False
|
find_spec = finder.find_spec
|
||||||
|
except AttributeError:
|
||||||
|
# These are old-school clases that still have a different API, just
|
||||||
|
# ignore those.
|
||||||
|
continue
|
||||||
|
|
||||||
|
spec = find_spec(string, p)
|
||||||
|
if spec is not None:
|
||||||
|
loader = spec.loader
|
||||||
|
if loader is None and not spec.has_location:
|
||||||
|
# This is a namespace package.
|
||||||
|
full_name = string if not path else full_name
|
||||||
|
implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path)
|
||||||
|
return None, implicit_ns_info, False
|
||||||
|
break
|
||||||
|
|
||||||
# we have found the tail end of the dotted path
|
|
||||||
loader = spec.loader
|
|
||||||
return find_module_py33(string, path, loader)
|
return find_module_py33(string, path, loader)
|
||||||
|
|
||||||
|
|
||||||
def find_module_py33(string, path=None, loader=None, full_name=None):
|
def find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True):
|
||||||
loader = loader or importlib.machinery.PathFinder.find_module(string, path)
|
loader = loader or importlib.machinery.PathFinder.find_module(string, path)
|
||||||
|
|
||||||
if loader is None and path is None: # Fallback to find builtins
|
if loader is None and path is None: # Fallback to find builtins
|
||||||
@@ -105,7 +114,7 @@ def find_module_py33(string, path=None, loader=None, full_name=None):
|
|||||||
return module_file, module_path, is_package
|
return module_file, module_path, is_package
|
||||||
|
|
||||||
|
|
||||||
def find_module_pre_py33(string, path=None, full_name=None):
|
def find_module_pre_py34(string, path=None, full_name=None, is_global_search=True):
|
||||||
# This import is here, because in other places it will raise a
|
# This import is here, because in other places it will raise a
|
||||||
# DeprecationWarning.
|
# DeprecationWarning.
|
||||||
import imp
|
import imp
|
||||||
@@ -140,8 +149,7 @@ def find_module_pre_py33(string, path=None, full_name=None):
|
|||||||
raise ImportError("No module named {}".format(string))
|
raise ImportError("No module named {}".format(string))
|
||||||
|
|
||||||
|
|
||||||
find_module = find_module_py33 if is_py33 else find_module_pre_py33
|
find_module = find_module_py34 if is_py3 else find_module_pre_py34
|
||||||
find_module = find_module_py34 if is_py34 else find_module
|
|
||||||
find_module.__doc__ = """
|
find_module.__doc__ = """
|
||||||
Provides information about a module.
|
Provides information about a module.
|
||||||
|
|
||||||
@@ -208,6 +216,7 @@ def _iter_modules(paths, prefix=''):
|
|||||||
yield importer, prefix + modname, ispkg
|
yield importer, prefix + modname, ispkg
|
||||||
# END COPY
|
# END COPY
|
||||||
|
|
||||||
|
|
||||||
iter_modules = _iter_modules if py_version >= 34 else pkgutil.iter_modules
|
iter_modules = _iter_modules if py_version >= 34 else pkgutil.iter_modules
|
||||||
|
|
||||||
|
|
||||||
@@ -253,6 +262,7 @@ Usage::
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
class Python3Method(object):
|
class Python3Method(object):
|
||||||
def __init__(self, func):
|
def __init__(self, func):
|
||||||
self.func = func
|
self.func = func
|
||||||
@@ -313,10 +323,10 @@ def force_unicode(obj):
|
|||||||
try:
|
try:
|
||||||
import builtins # module name in python 3
|
import builtins # module name in python 3
|
||||||
except ImportError:
|
except ImportError:
|
||||||
import __builtin__ as builtins
|
import __builtin__ as builtins # noqa: F401
|
||||||
|
|
||||||
|
|
||||||
import ast
|
import ast # noqa: F401
|
||||||
|
|
||||||
|
|
||||||
def literal_eval(string):
|
def literal_eval(string):
|
||||||
@@ -326,7 +336,7 @@ def literal_eval(string):
|
|||||||
try:
|
try:
|
||||||
from itertools import zip_longest
|
from itertools import zip_longest
|
||||||
except ImportError:
|
except ImportError:
|
||||||
from itertools import izip_longest as zip_longest # Python 2
|
from itertools import izip_longest as zip_longest # Python 2 # noqa: F401
|
||||||
|
|
||||||
try:
|
try:
|
||||||
FileNotFoundError = FileNotFoundError
|
FileNotFoundError = FileNotFoundError
|
||||||
@@ -338,6 +348,11 @@ try:
|
|||||||
except NameError:
|
except NameError:
|
||||||
NotADirectoryError = IOError
|
NotADirectoryError = IOError
|
||||||
|
|
||||||
|
try:
|
||||||
|
PermissionError = PermissionError
|
||||||
|
except NameError:
|
||||||
|
PermissionError = IOError
|
||||||
|
|
||||||
|
|
||||||
def no_unicode_pprint(dct):
|
def no_unicode_pprint(dct):
|
||||||
"""
|
"""
|
||||||
@@ -356,6 +371,7 @@ def print_to_stderr(*args):
|
|||||||
eval("print(*args, file=sys.stderr)")
|
eval("print(*args, file=sys.stderr)")
|
||||||
else:
|
else:
|
||||||
print >> sys.stderr, args
|
print >> sys.stderr, args
|
||||||
|
sys.stderr.flush()
|
||||||
|
|
||||||
|
|
||||||
def utf8_repr(func):
|
def utf8_repr(func):
|
||||||
@@ -379,10 +395,14 @@ def utf8_repr(func):
|
|||||||
if is_py3:
|
if is_py3:
|
||||||
import queue
|
import queue
|
||||||
else:
|
else:
|
||||||
import Queue as queue
|
import Queue as queue # noqa: F401
|
||||||
|
|
||||||
|
try:
|
||||||
import pickle
|
# Attempt to load the C implementation of pickle on Python 2 as it is way
|
||||||
|
# faster.
|
||||||
|
import cPickle as pickle
|
||||||
|
except ImportError:
|
||||||
|
import pickle
|
||||||
if sys.version_info[:2] == (3, 3):
|
if sys.version_info[:2] == (3, 3):
|
||||||
"""
|
"""
|
||||||
Monkeypatch the unpickler in Python 3.3. This is needed, because the
|
Monkeypatch the unpickler in Python 3.3. This is needed, because the
|
||||||
@@ -443,53 +463,45 @@ if sys.version_info[:2] == (3, 3):
|
|||||||
pickle.loads = loads
|
pickle.loads = loads
|
||||||
|
|
||||||
|
|
||||||
_PICKLE_PROTOCOL = 2
|
|
||||||
is_windows = sys.platform == 'win32'
|
|
||||||
|
|
||||||
# The Windows shell on Python 2 consumes all control characters (below 32) and expand on
|
|
||||||
# all Python versions \n to \r\n.
|
|
||||||
# pickle starting from protocol version 1 uses binary data, which could not be escaped by
|
|
||||||
# any normal unicode encoder. Therefore, the only bytes encoder which doesn't produce
|
|
||||||
# control characters is binascii.hexlify.
|
|
||||||
|
|
||||||
|
|
||||||
def pickle_load(file):
|
def pickle_load(file):
|
||||||
if is_windows:
|
try:
|
||||||
try:
|
|
||||||
data = file.readline()
|
|
||||||
data = binascii.unhexlify(data.strip())
|
|
||||||
if is_py3:
|
|
||||||
return pickle.loads(data, encoding='bytes')
|
|
||||||
else:
|
|
||||||
return pickle.loads(data)
|
|
||||||
# Python on Windows don't throw EOF errors for pipes. So reraise them with
|
|
||||||
# the correct type, which is cought upwards.
|
|
||||||
except OSError:
|
|
||||||
raise EOFError()
|
|
||||||
else:
|
|
||||||
if is_py3:
|
if is_py3:
|
||||||
return pickle.load(file, encoding='bytes')
|
return pickle.load(file, encoding='bytes')
|
||||||
else:
|
return pickle.load(file)
|
||||||
return pickle.load(file)
|
# Python on Windows don't throw EOF errors for pipes. So reraise them with
|
||||||
|
# the correct type, which is caught upwards.
|
||||||
|
except OSError:
|
||||||
|
if sys.platform == 'win32':
|
||||||
|
raise EOFError()
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def pickle_dump(data, file):
|
def pickle_dump(data, file, protocol):
|
||||||
if is_windows:
|
try:
|
||||||
try:
|
pickle.dump(data, file, protocol)
|
||||||
data = pickle.dumps(data, protocol=_PICKLE_PROTOCOL)
|
# On Python 3.3 flush throws sometimes an error even though the writing
|
||||||
data = binascii.hexlify(data)
|
# operation should be completed.
|
||||||
file.write(data)
|
|
||||||
file.write(b'\n')
|
|
||||||
# On Python 3.3 flush throws sometimes an error even if the two file writes
|
|
||||||
# should done it already before. This could be also computer / speed depending.
|
|
||||||
file.flush()
|
|
||||||
# Python on Windows don't throw EPIPE errors for pipes. So reraise them with
|
|
||||||
# the correct type and error number.
|
|
||||||
except OSError:
|
|
||||||
raise IOError(errno.EPIPE, "Broken pipe")
|
|
||||||
else:
|
|
||||||
pickle.dump(data, file, protocol=_PICKLE_PROTOCOL)
|
|
||||||
file.flush()
|
file.flush()
|
||||||
|
# Python on Windows don't throw EPIPE errors for pipes. So reraise them with
|
||||||
|
# the correct type and error number.
|
||||||
|
except OSError:
|
||||||
|
if sys.platform == 'win32':
|
||||||
|
raise IOError(errno.EPIPE, "Broken pipe")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# Determine the highest protocol version compatible for a given list of Python
|
||||||
|
# versions.
|
||||||
|
def highest_pickle_protocol(python_versions):
|
||||||
|
protocol = 4
|
||||||
|
for version in python_versions:
|
||||||
|
if version[0] == 2:
|
||||||
|
# The minimum protocol version for the versions of Python that we
|
||||||
|
# support (2.7 and 3.3+) is 2.
|
||||||
|
return 2
|
||||||
|
if version[1] < 4:
|
||||||
|
protocol = 3
|
||||||
|
return protocol
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -512,4 +524,71 @@ class GeneralizedPopen(subprocess.Popen):
|
|||||||
except AttributeError:
|
except AttributeError:
|
||||||
CREATE_NO_WINDOW = 0x08000000
|
CREATE_NO_WINDOW = 0x08000000
|
||||||
kwargs['creationflags'] = CREATE_NO_WINDOW
|
kwargs['creationflags'] = CREATE_NO_WINDOW
|
||||||
|
# The child process doesn't need file descriptors except 0, 1, 2.
|
||||||
|
# This is unix only.
|
||||||
|
kwargs['close_fds'] = 'posix' in sys.builtin_module_names
|
||||||
super(GeneralizedPopen, self).__init__(*args, **kwargs)
|
super(GeneralizedPopen, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
# shutil.which is not available on Python 2.7.
|
||||||
|
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
|
||||||
|
"""Given a command, mode, and a PATH string, return the path which
|
||||||
|
conforms to the given mode on the PATH, or None if there is no such
|
||||||
|
file.
|
||||||
|
|
||||||
|
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
|
||||||
|
of os.environ.get("PATH"), or can be overridden with a custom search
|
||||||
|
path.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Check that a given file can be accessed with the correct mode.
|
||||||
|
# Additionally check that `file` is not a directory, as on Windows
|
||||||
|
# directories pass the os.access check.
|
||||||
|
def _access_check(fn, mode):
|
||||||
|
return (os.path.exists(fn) and os.access(fn, mode)
|
||||||
|
and not os.path.isdir(fn))
|
||||||
|
|
||||||
|
# If we're given a path with a directory part, look it up directly rather
|
||||||
|
# than referring to PATH directories. This includes checking relative to the
|
||||||
|
# current directory, e.g. ./script
|
||||||
|
if os.path.dirname(cmd):
|
||||||
|
if _access_check(cmd, mode):
|
||||||
|
return cmd
|
||||||
|
return None
|
||||||
|
|
||||||
|
if path is None:
|
||||||
|
path = os.environ.get("PATH", os.defpath)
|
||||||
|
if not path:
|
||||||
|
return None
|
||||||
|
path = path.split(os.pathsep)
|
||||||
|
|
||||||
|
if sys.platform == "win32":
|
||||||
|
# The current directory takes precedence on Windows.
|
||||||
|
if os.curdir not in path:
|
||||||
|
path.insert(0, os.curdir)
|
||||||
|
|
||||||
|
# PATHEXT is necessary to check on Windows.
|
||||||
|
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
|
||||||
|
# See if the given file matches any of the expected path extensions.
|
||||||
|
# This will allow us to short circuit when given "python.exe".
|
||||||
|
# If it does match, only test that one, otherwise we have to try
|
||||||
|
# others.
|
||||||
|
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
|
||||||
|
files = [cmd]
|
||||||
|
else:
|
||||||
|
files = [cmd + ext for ext in pathext]
|
||||||
|
else:
|
||||||
|
# On other platforms you don't have things like PATHEXT to tell you
|
||||||
|
# what file suffixes are executable, so just pass on cmd as-is.
|
||||||
|
files = [cmd]
|
||||||
|
|
||||||
|
seen = set()
|
||||||
|
for dir in path:
|
||||||
|
normdir = os.path.normcase(dir)
|
||||||
|
if normdir not in seen:
|
||||||
|
seen.add(normdir)
|
||||||
|
for thefile in files:
|
||||||
|
name = os.path.join(dir, thefile)
|
||||||
|
if _access_check(name, mode):
|
||||||
|
return name
|
||||||
|
return None
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ arguments.
|
|||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import warnings
|
||||||
|
|
||||||
import parso
|
import parso
|
||||||
from parso.python import tree
|
from parso.python import tree
|
||||||
@@ -74,9 +75,6 @@ class Script(object):
|
|||||||
:param encoding: The encoding of ``source``, if it is not a
|
:param encoding: The encoding of ``source``, if it is not a
|
||||||
``unicode`` object (default ``'utf-8'``).
|
``unicode`` object (default ``'utf-8'``).
|
||||||
:type encoding: str
|
:type encoding: str
|
||||||
:param source_encoding: The encoding of ``source``, if it is not a
|
|
||||||
``unicode`` object (default ``'utf-8'``).
|
|
||||||
:type encoding: str
|
|
||||||
:param sys_path: ``sys.path`` to use during analysis of the script
|
:param sys_path: ``sys.path`` to use during analysis of the script
|
||||||
:type sys_path: list
|
:type sys_path: list
|
||||||
:param environment: TODO
|
:param environment: TODO
|
||||||
@@ -114,9 +112,10 @@ class Script(object):
|
|||||||
self._module_node, source = self._evaluator.parse_and_get_code(
|
self._module_node, source = self._evaluator.parse_and_get_code(
|
||||||
code=source,
|
code=source,
|
||||||
path=self.path,
|
path=self.path,
|
||||||
|
encoding=encoding,
|
||||||
cache=False, # No disk cache, because the current script often changes.
|
cache=False, # No disk cache, because the current script often changes.
|
||||||
diff_cache=True,
|
diff_cache=settings.fast_parser,
|
||||||
cache_path=settings.cache_directory
|
cache_path=settings.cache_directory,
|
||||||
)
|
)
|
||||||
debug.speed('parsed')
|
debug.speed('parsed')
|
||||||
self._code_lines = parso.split_lines(source, keepends=True)
|
self._code_lines = parso.split_lines(source, keepends=True)
|
||||||
@@ -134,7 +133,9 @@ class Script(object):
|
|||||||
|
|
||||||
column = line_len if column is None else column
|
column = line_len if column is None else column
|
||||||
if not (0 <= column <= line_len):
|
if not (0 <= column <= line_len):
|
||||||
raise ValueError('`column` parameter is not in a valid range.')
|
raise ValueError('`column` parameter (%d) is not in a valid range '
|
||||||
|
'(0-%d) for line %d (%r).' % (
|
||||||
|
column, line_len, line, line_string))
|
||||||
self._pos = line, column
|
self._pos = line, column
|
||||||
self._path = path
|
self._path = path
|
||||||
|
|
||||||
@@ -144,9 +145,9 @@ class Script(object):
|
|||||||
def _get_module(self):
|
def _get_module(self):
|
||||||
name = '__main__'
|
name = '__main__'
|
||||||
if self.path is not None:
|
if self.path is not None:
|
||||||
n = dotted_path_in_sys_path(self._evaluator.get_sys_path(), self.path)
|
import_names = dotted_path_in_sys_path(self._evaluator.get_sys_path(), self.path)
|
||||||
if n is not None:
|
if import_names is not None:
|
||||||
name = n
|
name = '.'.join(import_names)
|
||||||
|
|
||||||
module = ModuleContext(
|
module = ModuleContext(
|
||||||
self._evaluator, self._module_node, self.path,
|
self._evaluator, self._module_node, self.path,
|
||||||
@@ -156,7 +157,11 @@ class Script(object):
|
|||||||
return module
|
return module
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<%s: %s>' % (self.__class__.__name__, repr(self._orig_path))
|
return '<%s: %s %r>' % (
|
||||||
|
self.__class__.__name__,
|
||||||
|
repr(self._orig_path),
|
||||||
|
self._evaluator.environment,
|
||||||
|
)
|
||||||
|
|
||||||
def completions(self):
|
def completions(self):
|
||||||
"""
|
"""
|
||||||
@@ -172,6 +177,24 @@ class Script(object):
|
|||||||
self._pos, self.call_signatures
|
self._pos, self.call_signatures
|
||||||
)
|
)
|
||||||
completions = completion.completions()
|
completions = completion.completions()
|
||||||
|
|
||||||
|
def iter_import_completions():
|
||||||
|
for c in completions:
|
||||||
|
tree_name = c._name.tree_name
|
||||||
|
if tree_name is None:
|
||||||
|
continue
|
||||||
|
definition = tree_name.get_definition()
|
||||||
|
if definition is not None \
|
||||||
|
and definition.type in ('import_name', 'import_from'):
|
||||||
|
yield c
|
||||||
|
|
||||||
|
if len(list(iter_import_completions())) > 10:
|
||||||
|
# For now disable completions if there's a lot of imports that
|
||||||
|
# might potentially be resolved. This is the case for tensorflow
|
||||||
|
# and has been fixed for it. This is obviously temporary until we
|
||||||
|
# have a better solution.
|
||||||
|
self._evaluator.infer_enabled = False
|
||||||
|
|
||||||
debug.speed('completions end')
|
debug.speed('completions end')
|
||||||
return completions
|
return completions
|
||||||
|
|
||||||
@@ -203,20 +226,33 @@ class Script(object):
|
|||||||
# the API.
|
# the API.
|
||||||
return helpers.sorted_definitions(set(defs))
|
return helpers.sorted_definitions(set(defs))
|
||||||
|
|
||||||
def goto_assignments(self, follow_imports=False):
|
def goto_assignments(self, follow_imports=False, follow_builtin_imports=False):
|
||||||
"""
|
"""
|
||||||
Return the first definition found, while optionally following imports.
|
Return the first definition found, while optionally following imports.
|
||||||
Multiple objects may be returned, because Python itself is a
|
Multiple objects may be returned, because Python itself is a
|
||||||
dynamic language, which means depending on an option you can have two
|
dynamic language, which means depending on an option you can have two
|
||||||
different versions of a function.
|
different versions of a function.
|
||||||
|
|
||||||
|
:param follow_imports: The goto call will follow imports.
|
||||||
|
:param follow_builtin_imports: If follow_imports is True will decide if
|
||||||
|
it follow builtin imports.
|
||||||
:rtype: list of :class:`classes.Definition`
|
:rtype: list of :class:`classes.Definition`
|
||||||
"""
|
"""
|
||||||
def filter_follow_imports(names, check):
|
def filter_follow_imports(names, check):
|
||||||
for name in names:
|
for name in names:
|
||||||
if check(name):
|
if check(name):
|
||||||
for result in filter_follow_imports(name.goto(), check):
|
new_names = list(filter_follow_imports(name.goto(), check))
|
||||||
yield result
|
found_builtin = False
|
||||||
|
if follow_builtin_imports:
|
||||||
|
for new_name in new_names:
|
||||||
|
if new_name.start_pos is None:
|
||||||
|
found_builtin = True
|
||||||
|
|
||||||
|
if found_builtin and not isinstance(name, imports.SubModuleName):
|
||||||
|
yield name
|
||||||
|
else:
|
||||||
|
for new_name in new_names:
|
||||||
|
yield new_name
|
||||||
else:
|
else:
|
||||||
yield name
|
yield name
|
||||||
|
|
||||||
@@ -238,7 +274,7 @@ class Script(object):
|
|||||||
defs = [classes.Definition(self._evaluator, d) for d in set(names)]
|
defs = [classes.Definition(self._evaluator, d) for d in set(names)]
|
||||||
return helpers.sorted_definitions(defs)
|
return helpers.sorted_definitions(defs)
|
||||||
|
|
||||||
def usages(self, additional_module_paths=()):
|
def usages(self, additional_module_paths=(), **kwargs):
|
||||||
"""
|
"""
|
||||||
Return :class:`classes.Definition` objects, which contain all
|
Return :class:`classes.Definition` objects, which contain all
|
||||||
names that point to the definition of the name under the cursor. This
|
names that point to the definition of the name under the cursor. This
|
||||||
@@ -247,17 +283,31 @@ class Script(object):
|
|||||||
|
|
||||||
.. todo:: Implement additional_module_paths
|
.. todo:: Implement additional_module_paths
|
||||||
|
|
||||||
|
:param additional_module_paths: Deprecated, never ever worked.
|
||||||
|
:param include_builtins: Default True, checks if a usage is a builtin
|
||||||
|
(e.g. ``sys``) and in that case does not return it.
|
||||||
:rtype: list of :class:`classes.Definition`
|
:rtype: list of :class:`classes.Definition`
|
||||||
"""
|
"""
|
||||||
tree_name = self._module_node.get_name_of_position(self._pos)
|
if additional_module_paths:
|
||||||
if tree_name is None:
|
warnings.warn(
|
||||||
# Must be syntax
|
"Deprecated since version 0.12.0. This never even worked, just ignore it.",
|
||||||
return []
|
DeprecationWarning,
|
||||||
|
stacklevel=2
|
||||||
|
)
|
||||||
|
|
||||||
names = usages.usages(self._get_module(), tree_name)
|
def _usages(include_builtins=True):
|
||||||
|
tree_name = self._module_node.get_name_of_position(self._pos)
|
||||||
|
if tree_name is None:
|
||||||
|
# Must be syntax
|
||||||
|
return []
|
||||||
|
|
||||||
definitions = [classes.Definition(self._evaluator, n) for n in names]
|
names = usages.usages(self._get_module(), tree_name)
|
||||||
return helpers.sorted_definitions(definitions)
|
|
||||||
|
definitions = [classes.Definition(self._evaluator, n) for n in names]
|
||||||
|
if not include_builtins:
|
||||||
|
definitions = [d for d in definitions if not d.in_builtin_module()]
|
||||||
|
return helpers.sorted_definitions(definitions)
|
||||||
|
return _usages(**kwargs)
|
||||||
|
|
||||||
def call_signatures(self):
|
def call_signatures(self):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ from jedi.evaluate import imports
|
|||||||
from jedi.evaluate import compiled
|
from jedi.evaluate import compiled
|
||||||
from jedi.evaluate.imports import ImportName
|
from jedi.evaluate.imports import ImportName
|
||||||
from jedi.evaluate.context import instance
|
from jedi.evaluate.context import instance
|
||||||
from jedi.evaluate.context import ClassContext, FunctionContext, FunctionExecutionContext
|
from jedi.evaluate.context import ClassContext, FunctionExecutionContext
|
||||||
from jedi.api.keywords import KeywordName
|
from jedi.api.keywords import KeywordName
|
||||||
|
|
||||||
|
|
||||||
@@ -342,7 +342,7 @@ class BaseDefinition(object):
|
|||||||
|
|
||||||
followed = list(self._name.infer())
|
followed = list(self._name.infer())
|
||||||
if not followed or not hasattr(followed[0], 'py__call__'):
|
if not followed or not hasattr(followed[0], 'py__call__'):
|
||||||
raise AttributeError()
|
raise AttributeError('There are no params defined on this.')
|
||||||
context = followed[0] # only check the first one.
|
context = followed[0] # only check the first one.
|
||||||
|
|
||||||
return [Definition(self._evaluator, n) for n in get_param_names(context)]
|
return [Definition(self._evaluator, n) for n in get_param_names(context)]
|
||||||
@@ -353,10 +353,7 @@ class BaseDefinition(object):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
if isinstance(context, FunctionExecutionContext):
|
if isinstance(context, FunctionExecutionContext):
|
||||||
# TODO the function context should be a part of the function
|
context = context.function_context
|
||||||
# execution context.
|
|
||||||
context = FunctionContext(
|
|
||||||
self._evaluator, context.parent_context, context.tree_node)
|
|
||||||
return Definition(self._evaluator, context.name)
|
return Definition(self._evaluator, context.name)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
@@ -404,8 +401,9 @@ class Completion(BaseDefinition):
|
|||||||
append = '('
|
append = '('
|
||||||
|
|
||||||
if self._name.api_type == 'param' and self._stack is not None:
|
if self._name.api_type == 'param' and self._stack is not None:
|
||||||
node_names = list(self._stack.get_node_names(self._evaluator.grammar._pgen_grammar))
|
nonterminals = [stack_node.nonterminal for stack_node in self._stack]
|
||||||
if 'trailer' in node_names and 'argument' not in node_names:
|
if 'trailer' in nonterminals and 'argument' not in nonterminals:
|
||||||
|
# TODO this doesn't work for nested calls.
|
||||||
append += '='
|
append += '='
|
||||||
|
|
||||||
name = self._name.string_name
|
name = self._name.string_name
|
||||||
@@ -535,9 +533,9 @@ class Definition(BaseDefinition):
|
|||||||
# here.
|
# here.
|
||||||
txt = definition.get_code(include_prefix=False)
|
txt = definition.get_code(include_prefix=False)
|
||||||
# Delete comments:
|
# Delete comments:
|
||||||
txt = re.sub('#[^\n]+\n', ' ', txt)
|
txt = re.sub(r'#[^\n]+\n', ' ', txt)
|
||||||
# Delete multi spaces/newlines
|
# Delete multi spaces/newlines
|
||||||
txt = re.sub('\s+', ' ', txt).strip()
|
txt = re.sub(r'\s+', ' ', txt).strip()
|
||||||
return txt
|
return txt
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -637,9 +635,18 @@ class CallSignature(Definition):
|
|||||||
"""
|
"""
|
||||||
return self._bracket_start_pos
|
return self._bracket_start_pos
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _params_str(self):
|
||||||
|
return ', '.join([p.description[6:]
|
||||||
|
for p in self.params])
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<%s: %s index %s>' % \
|
return '<%s: %s index=%r params=[%s]>' % (
|
||||||
(type(self).__name__, self._name.string_name, self.index)
|
type(self).__name__,
|
||||||
|
self._name.string_name,
|
||||||
|
self._index,
|
||||||
|
self._params_str,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class _Help(object):
|
class _Help(object):
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from parso.python import token
|
from parso.python.token import PythonTokenTypes
|
||||||
from parso.python import tree
|
from parso.python import tree
|
||||||
from parso.tree import search_ancestor, Leaf
|
from parso.tree import search_ancestor, Leaf
|
||||||
|
|
||||||
@@ -57,7 +57,8 @@ def get_user_scope(module_context, position):
|
|||||||
def scan(scope):
|
def scan(scope):
|
||||||
for s in scope.children:
|
for s in scope.children:
|
||||||
if s.start_pos <= position <= s.end_pos:
|
if s.start_pos <= position <= s.end_pos:
|
||||||
if isinstance(s, (tree.Scope, tree.Flow)):
|
if isinstance(s, (tree.Scope, tree.Flow)) \
|
||||||
|
or s.type in ('async_stmt', 'async_funcdef'):
|
||||||
return scan(s) or s
|
return scan(s) or s
|
||||||
elif s.type in ('suite', 'decorated'):
|
elif s.type in ('suite', 'decorated'):
|
||||||
return scan(s)
|
return scan(s)
|
||||||
@@ -121,11 +122,11 @@ class Completion:
|
|||||||
grammar = self._evaluator.grammar
|
grammar = self._evaluator.grammar
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.stack = helpers.get_stack_at_position(
|
self.stack = stack = helpers.get_stack_at_position(
|
||||||
grammar, self._code_lines, self._module_node, self._position
|
grammar, self._code_lines, self._module_node, self._position
|
||||||
)
|
)
|
||||||
except helpers.OnErrorLeaf as e:
|
except helpers.OnErrorLeaf as e:
|
||||||
self.stack = None
|
self.stack = stack = None
|
||||||
if e.error_leaf.value == '.':
|
if e.error_leaf.value == '.':
|
||||||
# After ErrorLeaf's that are dots, we will not do any
|
# After ErrorLeaf's that are dots, we will not do any
|
||||||
# completions since this probably just confuses the user.
|
# completions since this probably just confuses the user.
|
||||||
@@ -134,10 +135,10 @@ class Completion:
|
|||||||
|
|
||||||
return self._global_completions()
|
return self._global_completions()
|
||||||
|
|
||||||
allowed_keywords, allowed_tokens = \
|
allowed_transitions = \
|
||||||
helpers.get_possible_completion_types(grammar._pgen_grammar, self.stack)
|
list(stack._allowed_transition_names_and_token_types())
|
||||||
|
|
||||||
if 'if' in allowed_keywords:
|
if 'if' in allowed_transitions:
|
||||||
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
|
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
|
||||||
previous_leaf = leaf.get_previous_leaf()
|
previous_leaf = leaf.get_previous_leaf()
|
||||||
|
|
||||||
@@ -163,50 +164,52 @@ class Completion:
|
|||||||
# Compare indents
|
# Compare indents
|
||||||
if stmt.start_pos[1] == indent:
|
if stmt.start_pos[1] == indent:
|
||||||
if type_ == 'if_stmt':
|
if type_ == 'if_stmt':
|
||||||
allowed_keywords += ['elif', 'else']
|
allowed_transitions += ['elif', 'else']
|
||||||
elif type_ == 'try_stmt':
|
elif type_ == 'try_stmt':
|
||||||
allowed_keywords += ['except', 'finally', 'else']
|
allowed_transitions += ['except', 'finally', 'else']
|
||||||
elif type_ == 'for_stmt':
|
elif type_ == 'for_stmt':
|
||||||
allowed_keywords.append('else')
|
allowed_transitions.append('else')
|
||||||
|
|
||||||
completion_names = list(self._get_keyword_completion_names(allowed_keywords))
|
completion_names = list(self._get_keyword_completion_names(allowed_transitions))
|
||||||
|
|
||||||
if token.NAME in allowed_tokens or token.INDENT in allowed_tokens:
|
if any(t in allowed_transitions for t in (PythonTokenTypes.NAME,
|
||||||
|
PythonTokenTypes.INDENT)):
|
||||||
# This means that we actually have to do type inference.
|
# This means that we actually have to do type inference.
|
||||||
|
|
||||||
symbol_names = list(self.stack.get_node_names(grammar._pgen_grammar))
|
nonterminals = [stack_node.nonterminal for stack_node in stack]
|
||||||
|
|
||||||
nodes = list(self.stack.get_nodes())
|
nodes = [node for stack_node in stack for node in stack_node.nodes]
|
||||||
|
|
||||||
if nodes and nodes[-1] in ('as', 'def', 'class'):
|
if nodes and nodes[-1] in ('as', 'def', 'class'):
|
||||||
# No completions for ``with x as foo`` and ``import x as foo``.
|
# No completions for ``with x as foo`` and ``import x as foo``.
|
||||||
# Also true for defining names as a class or function.
|
# Also true for defining names as a class or function.
|
||||||
return list(self._get_class_context_completions(is_function=True))
|
return list(self._get_class_context_completions(is_function=True))
|
||||||
elif "import_stmt" in symbol_names:
|
elif "import_stmt" in nonterminals:
|
||||||
level, names = self._parse_dotted_names(nodes, "import_from" in symbol_names)
|
level, names = self._parse_dotted_names(nodes, "import_from" in nonterminals)
|
||||||
|
|
||||||
only_modules = not ("import_from" in symbol_names and 'import' in nodes)
|
only_modules = not ("import_from" in nonterminals and 'import' in nodes)
|
||||||
completion_names += self._get_importer_names(
|
completion_names += self._get_importer_names(
|
||||||
names,
|
names,
|
||||||
level,
|
level,
|
||||||
only_modules=only_modules,
|
only_modules=only_modules,
|
||||||
)
|
)
|
||||||
elif symbol_names[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
|
elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
|
||||||
dot = self._module_node.get_leaf_for_position(self._position)
|
dot = self._module_node.get_leaf_for_position(self._position)
|
||||||
completion_names += self._trailer_completions(dot.get_previous_leaf())
|
completion_names += self._trailer_completions(dot.get_previous_leaf())
|
||||||
else:
|
else:
|
||||||
completion_names += self._global_completions()
|
completion_names += self._global_completions()
|
||||||
completion_names += self._get_class_context_completions(is_function=False)
|
completion_names += self._get_class_context_completions(is_function=False)
|
||||||
|
|
||||||
if 'trailer' in symbol_names:
|
if 'trailer' in nonterminals:
|
||||||
call_signatures = self._call_signatures_method()
|
call_signatures = self._call_signatures_method()
|
||||||
completion_names += get_call_signature_param_names(call_signatures)
|
completion_names += get_call_signature_param_names(call_signatures)
|
||||||
|
|
||||||
return completion_names
|
return completion_names
|
||||||
|
|
||||||
def _get_keyword_completion_names(self, keywords_):
|
def _get_keyword_completion_names(self, allowed_transitions):
|
||||||
for k in keywords_:
|
for k in allowed_transitions:
|
||||||
yield keywords.KeywordName(self._evaluator, k)
|
if isinstance(k, str) and k.isalpha():
|
||||||
|
yield keywords.KeywordName(self._evaluator, k)
|
||||||
|
|
||||||
def _global_completions(self):
|
def _global_completions(self):
|
||||||
context = get_user_scope(self._module_context, self._position)
|
context = get_user_scope(self._module_context, self._position)
|
||||||
|
|||||||
@@ -3,26 +3,21 @@ Environments are a way to activate different Python versions or Virtualenvs for
|
|||||||
static analysis. The Python binary in that environment is going to be executed.
|
static analysis. The Python binary in that environment is going to be executed.
|
||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
import sys
|
import sys
|
||||||
import hashlib
|
import hashlib
|
||||||
import filecmp
|
import filecmp
|
||||||
from subprocess import PIPE
|
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
# When dropping Python 2.7 support we should consider switching to
|
|
||||||
# `shutil.which`.
|
|
||||||
from distutils.spawn import find_executable
|
|
||||||
|
|
||||||
from jedi._compatibility import GeneralizedPopen
|
from jedi._compatibility import highest_pickle_protocol, which
|
||||||
from jedi.cache import memoize_method, time_cache
|
from jedi.cache import memoize_method, time_cache
|
||||||
from jedi.evaluate.compiled.subprocess import get_subprocess, \
|
from jedi.evaluate.compiled.subprocess import CompiledSubprocess, \
|
||||||
EvaluatorSameProcess, EvaluatorSubprocess
|
EvaluatorSameProcess, EvaluatorSubprocess
|
||||||
|
|
||||||
import parso
|
import parso
|
||||||
|
|
||||||
_VersionInfo = namedtuple('VersionInfo', 'major minor micro')
|
_VersionInfo = namedtuple('VersionInfo', 'major minor micro')
|
||||||
|
|
||||||
_SUPPORTED_PYTHONS = ['3.6', '3.5', '3.4', '3.3', '2.7']
|
_SUPPORTED_PYTHONS = ['3.7', '3.6', '3.5', '3.4', '3.3', '2.7']
|
||||||
_SAFE_PATHS = ['/usr/bin', '/usr/local/bin']
|
_SAFE_PATHS = ['/usr/bin', '/usr/local/bin']
|
||||||
_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
|
_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
|
||||||
|
|
||||||
@@ -49,46 +44,66 @@ class _BaseEnvironment(object):
|
|||||||
return self._hash
|
return self._hash
|
||||||
|
|
||||||
|
|
||||||
|
def _get_info():
|
||||||
|
return (
|
||||||
|
sys.executable,
|
||||||
|
sys.prefix,
|
||||||
|
sys.version_info[:3],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Environment(_BaseEnvironment):
|
class Environment(_BaseEnvironment):
|
||||||
"""
|
"""
|
||||||
This class is supposed to be created by internal Jedi architecture. You
|
This class is supposed to be created by internal Jedi architecture. You
|
||||||
should not create it directly. Please use create_environment or the other
|
should not create it directly. Please use create_environment or the other
|
||||||
functions instead. It is then returned by that function.
|
functions instead. It is then returned by that function.
|
||||||
"""
|
"""
|
||||||
def __init__(self, path, executable):
|
_subprocess = None
|
||||||
self.path = os.path.abspath(path)
|
|
||||||
"""
|
def __init__(self, executable):
|
||||||
The path to an environment, matches ``sys.prefix``.
|
self._start_executable = executable
|
||||||
"""
|
# Initialize the environment
|
||||||
self.executable = os.path.abspath(executable)
|
self._get_subprocess()
|
||||||
|
|
||||||
|
def _get_subprocess(self):
|
||||||
|
if self._subprocess is not None and not self._subprocess.is_crashed:
|
||||||
|
return self._subprocess
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._subprocess = CompiledSubprocess(self._start_executable)
|
||||||
|
info = self._subprocess._send(None, _get_info)
|
||||||
|
except Exception as exc:
|
||||||
|
raise InvalidPythonEnvironment(
|
||||||
|
"Could not get version information for %r: %r" % (
|
||||||
|
self._start_executable,
|
||||||
|
exc))
|
||||||
|
|
||||||
|
# Since it could change and might not be the same(?) as the one given,
|
||||||
|
# set it here.
|
||||||
|
self.executable = info[0]
|
||||||
"""
|
"""
|
||||||
The Python executable, matches ``sys.executable``.
|
The Python executable, matches ``sys.executable``.
|
||||||
"""
|
"""
|
||||||
self.version_info = self._get_version()
|
self.path = info[1]
|
||||||
|
"""
|
||||||
|
The path to an environment, matches ``sys.prefix``.
|
||||||
|
"""
|
||||||
|
self.version_info = _VersionInfo(*info[2])
|
||||||
"""
|
"""
|
||||||
|
|
||||||
Like ``sys.version_info``. A tuple to show the current Environment's
|
Like ``sys.version_info``. A tuple to show the current Environment's
|
||||||
Python version.
|
Python version.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _get_version(self):
|
# py2 sends bytes via pickle apparently?!
|
||||||
try:
|
if self.version_info.major == 2:
|
||||||
process = GeneralizedPopen([self.executable, '--version'], stdout=PIPE, stderr=PIPE)
|
self.executable = self.executable.decode()
|
||||||
stdout, stderr = process.communicate()
|
self.path = self.path.decode()
|
||||||
retcode = process.poll()
|
|
||||||
if retcode:
|
|
||||||
raise InvalidPythonEnvironment()
|
|
||||||
except OSError:
|
|
||||||
raise InvalidPythonEnvironment()
|
|
||||||
|
|
||||||
# Until Python 3.4 wthe version string is part of stderr, after that
|
# Adjust pickle protocol according to host and client version.
|
||||||
# stdout.
|
self._subprocess._pickle_protocol = highest_pickle_protocol([
|
||||||
output = stdout + stderr
|
sys.version_info, self.version_info])
|
||||||
match = re.match(br'Python (\d+)\.(\d+)\.(\d+)', output)
|
|
||||||
if match is None:
|
|
||||||
raise InvalidPythonEnvironment("--version not working")
|
|
||||||
|
|
||||||
return _VersionInfo(*[int(m) for m in match.groups()])
|
return self._subprocess
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
version = '.'.join(str(i) for i in self.version_info)
|
version = '.'.join(str(i) for i in self.version_info)
|
||||||
@@ -97,9 +112,6 @@ class Environment(_BaseEnvironment):
|
|||||||
def get_evaluator_subprocess(self, evaluator):
|
def get_evaluator_subprocess(self, evaluator):
|
||||||
return EvaluatorSubprocess(evaluator, self._get_subprocess())
|
return EvaluatorSubprocess(evaluator, self._get_subprocess())
|
||||||
|
|
||||||
def _get_subprocess(self):
|
|
||||||
return get_subprocess(self.executable)
|
|
||||||
|
|
||||||
@memoize_method
|
@memoize_method
|
||||||
def get_sys_path(self):
|
def get_sys_path(self):
|
||||||
"""
|
"""
|
||||||
@@ -118,10 +130,9 @@ class Environment(_BaseEnvironment):
|
|||||||
|
|
||||||
class SameEnvironment(Environment):
|
class SameEnvironment(Environment):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(SameEnvironment, self).__init__(sys.prefix, sys.executable)
|
self._start_executable = self.executable = sys.executable
|
||||||
|
self.path = sys.prefix
|
||||||
def _get_version(self):
|
self.version_info = _VersionInfo(*sys.version_info[:3])
|
||||||
return _VersionInfo(*sys.version_info[:3])
|
|
||||||
|
|
||||||
|
|
||||||
class InterpreterEnvironment(_BaseEnvironment):
|
class InterpreterEnvironment(_BaseEnvironment):
|
||||||
@@ -136,13 +147,18 @@ class InterpreterEnvironment(_BaseEnvironment):
|
|||||||
|
|
||||||
|
|
||||||
def _get_virtual_env_from_var():
|
def _get_virtual_env_from_var():
|
||||||
|
"""Get virtualenv environment from VIRTUAL_ENV environment variable.
|
||||||
|
|
||||||
|
It uses `safe=False` with ``create_environment``, because the environment
|
||||||
|
variable is considered to be safe / controlled by the user solely.
|
||||||
|
"""
|
||||||
var = os.environ.get('VIRTUAL_ENV')
|
var = os.environ.get('VIRTUAL_ENV')
|
||||||
if var is not None:
|
if var:
|
||||||
if var == sys.prefix:
|
if var == sys.prefix:
|
||||||
return SameEnvironment()
|
return _try_get_same_env()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return create_environment(var)
|
return create_environment(var, safe=False)
|
||||||
except InvalidPythonEnvironment:
|
except InvalidPythonEnvironment:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -168,16 +184,60 @@ def get_default_environment():
|
|||||||
if virtual_env is not None:
|
if virtual_env is not None:
|
||||||
return virtual_env
|
return virtual_env
|
||||||
|
|
||||||
for environment in find_system_environments():
|
return _try_get_same_env()
|
||||||
return environment
|
|
||||||
|
|
||||||
# If no Python Environment is found, use the environment we're already
|
|
||||||
|
def _try_get_same_env():
|
||||||
|
env = SameEnvironment()
|
||||||
|
if not os.path.basename(env.executable).lower().startswith('python'):
|
||||||
|
# This tries to counter issues with embedding. In some cases (e.g.
|
||||||
|
# VIM's Python Mac/Windows, sys.executable is /foo/bar/vim. This
|
||||||
|
# happens, because for Mac a function called `_NSGetExecutablePath` is
|
||||||
|
# used and for Windows `GetModuleFileNameW`. These are both platform
|
||||||
|
# specific functions. For all other systems sys.executable should be
|
||||||
|
# alright. However here we try to generalize:
|
||||||
|
#
|
||||||
|
# 1. Check if the executable looks like python (heuristic)
|
||||||
|
# 2. In case it's not try to find the executable
|
||||||
|
# 3. In case we don't find it use an interpreter environment.
|
||||||
|
#
|
||||||
|
# The last option will always work, but leads to potential crashes of
|
||||||
|
# Jedi - which is ok, because it happens very rarely and even less,
|
||||||
|
# because the code below should work for most cases.
|
||||||
|
if os.name == 'nt':
|
||||||
|
# The first case would be a virtualenv and the second a normal
|
||||||
|
# Python installation.
|
||||||
|
checks = (r'Scripts\python.exe', 'python.exe')
|
||||||
|
else:
|
||||||
|
# For unix it looks like Python is always in a bin folder.
|
||||||
|
checks = (
|
||||||
|
'bin/python%s.%s' % (sys.version_info[0], sys.version[1]),
|
||||||
|
'bin/python%s' % (sys.version_info[0]),
|
||||||
|
'bin/python',
|
||||||
|
)
|
||||||
|
for check in checks:
|
||||||
|
guess = os.path.join(sys.exec_prefix, check)
|
||||||
|
if os.path.isfile(guess):
|
||||||
|
# Bingo - We think we have our Python.
|
||||||
|
return Environment(guess)
|
||||||
|
# It looks like there is no reasonable Python to be found.
|
||||||
|
return InterpreterEnvironment()
|
||||||
|
# If no virtualenv is found, use the environment we're already
|
||||||
# using.
|
# using.
|
||||||
return SameEnvironment()
|
return env
|
||||||
|
|
||||||
|
|
||||||
|
def get_cached_default_environment():
|
||||||
|
var = os.environ.get('VIRTUAL_ENV')
|
||||||
|
environment = _get_cached_default_environment()
|
||||||
|
if var and var != environment.path:
|
||||||
|
_get_cached_default_environment.clear_cache()
|
||||||
|
return _get_cached_default_environment()
|
||||||
|
return environment
|
||||||
|
|
||||||
|
|
||||||
@time_cache(seconds=10 * 60) # 10 Minutes
|
@time_cache(seconds=10 * 60) # 10 Minutes
|
||||||
def get_cached_default_environment():
|
def _get_cached_default_environment():
|
||||||
return get_default_environment()
|
return get_default_environment()
|
||||||
|
|
||||||
|
|
||||||
@@ -222,7 +282,7 @@ def find_virtualenvs(paths=None, **kwargs):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
executable = _get_executable_path(path, safe=safe)
|
executable = _get_executable_path(path, safe=safe)
|
||||||
yield Environment(path, executable)
|
yield Environment(executable)
|
||||||
except InvalidPythonEnvironment:
|
except InvalidPythonEnvironment:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -246,23 +306,6 @@ def find_system_environments():
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
# TODO: the logic to find the Python prefix is much more complicated than that.
|
|
||||||
# See Modules/getpath.c for UNIX and PC/getpathp.c for Windows in CPython's
|
|
||||||
# source code. A solution would be to deduce it by running the Python
|
|
||||||
# interpreter and printing the value of sys.prefix.
|
|
||||||
def _get_python_prefix(executable):
|
|
||||||
if os.name != 'nt':
|
|
||||||
return os.path.dirname(os.path.dirname(executable))
|
|
||||||
landmark = os.path.join('Lib', 'os.py')
|
|
||||||
prefix = os.path.dirname(executable)
|
|
||||||
while prefix:
|
|
||||||
if os.path.join(prefix, landmark):
|
|
||||||
return prefix
|
|
||||||
prefix = os.path.dirname(prefix)
|
|
||||||
raise InvalidPythonEnvironment(
|
|
||||||
"Cannot find prefix of executable %s." % executable)
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: this function should probably return a list of environments since
|
# TODO: this function should probably return a list of environments since
|
||||||
# multiple Python installations can be found on a system for the same version.
|
# multiple Python installations can be found on a system for the same version.
|
||||||
def get_system_environment(version):
|
def get_system_environment(version):
|
||||||
@@ -273,26 +316,30 @@ def get_system_environment(version):
|
|||||||
:raises: :exc:`.InvalidPythonEnvironment`
|
:raises: :exc:`.InvalidPythonEnvironment`
|
||||||
:returns: :class:`Environment`
|
:returns: :class:`Environment`
|
||||||
"""
|
"""
|
||||||
exe = find_executable('python' + version)
|
exe = which('python' + version)
|
||||||
if exe:
|
if exe:
|
||||||
if exe == sys.executable:
|
if exe == sys.executable:
|
||||||
return SameEnvironment()
|
return SameEnvironment()
|
||||||
return Environment(_get_python_prefix(exe), exe)
|
return Environment(exe)
|
||||||
|
|
||||||
if os.name == 'nt':
|
if os.name == 'nt':
|
||||||
for prefix, exe in _get_executables_from_windows_registry(version):
|
for exe in _get_executables_from_windows_registry(version):
|
||||||
return Environment(prefix, exe)
|
return Environment(exe)
|
||||||
raise InvalidPythonEnvironment("Cannot find executable python%s." % version)
|
raise InvalidPythonEnvironment("Cannot find executable python%s." % version)
|
||||||
|
|
||||||
|
|
||||||
def create_environment(path, safe=True):
|
def create_environment(path, safe=True):
|
||||||
"""
|
"""
|
||||||
Make it possible to create an environment by hand.
|
Make it possible to manually create an Environment object by specifying a
|
||||||
|
Virtualenv path or an executable path.
|
||||||
|
|
||||||
:raises: :exc:`.InvalidPythonEnvironment`
|
:raises: :exc:`.InvalidPythonEnvironment`
|
||||||
:returns: :class:`Environment`
|
:returns: :class:`Environment`
|
||||||
"""
|
"""
|
||||||
return Environment(path, _get_executable_path(path, safe=safe))
|
if os.path.isfile(path):
|
||||||
|
_assert_safe(path, safe)
|
||||||
|
return Environment(path)
|
||||||
|
return Environment(_get_executable_path(path, safe=safe))
|
||||||
|
|
||||||
|
|
||||||
def _get_executable_path(path, safe=True):
|
def _get_executable_path(path, safe=True):
|
||||||
@@ -307,24 +354,23 @@ def _get_executable_path(path, safe=True):
|
|||||||
if not os.path.exists(python):
|
if not os.path.exists(python):
|
||||||
raise InvalidPythonEnvironment("%s seems to be missing." % python)
|
raise InvalidPythonEnvironment("%s seems to be missing." % python)
|
||||||
|
|
||||||
if safe and not _is_safe(python):
|
_assert_safe(python, safe)
|
||||||
raise InvalidPythonEnvironment("The python binary is potentially unsafe.")
|
|
||||||
return python
|
return python
|
||||||
|
|
||||||
|
|
||||||
def _get_executables_from_windows_registry(version):
|
def _get_executables_from_windows_registry(version):
|
||||||
# The winreg module is named _winreg on Python 2.
|
# The winreg module is named _winreg on Python 2.
|
||||||
try:
|
try:
|
||||||
import winreg
|
import winreg
|
||||||
except ImportError:
|
except ImportError:
|
||||||
import _winreg as winreg
|
import _winreg as winreg
|
||||||
|
|
||||||
# TODO: support Python Anaconda.
|
# TODO: support Python Anaconda.
|
||||||
sub_keys = [
|
sub_keys = [
|
||||||
r'SOFTWARE\Python\PythonCore\{version}\InstallPath',
|
r'SOFTWARE\Python\PythonCore\{version}\InstallPath',
|
||||||
r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath',
|
r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath',
|
||||||
r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath',
|
r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath',
|
||||||
r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath'
|
r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath'
|
||||||
]
|
]
|
||||||
for root_key in [winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE]:
|
for root_key in [winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE]:
|
||||||
for sub_key in sub_keys:
|
for sub_key in sub_keys:
|
||||||
@@ -334,11 +380,17 @@ def _get_executables_from_windows_registry(version):
|
|||||||
prefix = winreg.QueryValueEx(key, '')[0]
|
prefix = winreg.QueryValueEx(key, '')[0]
|
||||||
exe = os.path.join(prefix, 'python.exe')
|
exe = os.path.join(prefix, 'python.exe')
|
||||||
if os.path.isfile(exe):
|
if os.path.isfile(exe):
|
||||||
yield prefix, exe
|
yield exe
|
||||||
except WindowsError:
|
except WindowsError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _assert_safe(executable_path, safe):
|
||||||
|
if safe and not _is_safe(executable_path):
|
||||||
|
raise InvalidPythonEnvironment(
|
||||||
|
"The python binary is potentially unsafe.")
|
||||||
|
|
||||||
|
|
||||||
def _is_safe(executable_path):
|
def _is_safe(executable_path):
|
||||||
# Resolve sym links. A venv typically is a symlink to a known Python
|
# Resolve sym links. A venv typically is a symlink to a known Python
|
||||||
# binary. Only virtualenvs copy symlinks around.
|
# binary. Only virtualenvs copy symlinks around.
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ from jedi._compatibility import u
|
|||||||
from jedi.evaluate.syntax_tree import eval_atom
|
from jedi.evaluate.syntax_tree import eval_atom
|
||||||
from jedi.evaluate.helpers import evaluate_call_of_leaf
|
from jedi.evaluate.helpers import evaluate_call_of_leaf
|
||||||
from jedi.evaluate.compiled import get_string_context_set
|
from jedi.evaluate.compiled import get_string_context_set
|
||||||
from jedi.evaluate.base_context import ContextSet
|
|
||||||
from jedi.cache import call_signature_time_cache
|
from jedi.cache import call_signature_time_cache
|
||||||
|
|
||||||
|
|
||||||
@@ -106,14 +105,17 @@ def get_stack_at_position(grammar, code_lines, module_node, pos):
|
|||||||
# TODO This is for now not an official parso API that exists purely
|
# TODO This is for now not an official parso API that exists purely
|
||||||
# for Jedi.
|
# for Jedi.
|
||||||
tokens = grammar._tokenize(code)
|
tokens = grammar._tokenize(code)
|
||||||
for token_ in tokens:
|
for token in tokens:
|
||||||
if token_.string == safeword:
|
if token.string == safeword:
|
||||||
raise EndMarkerReached()
|
raise EndMarkerReached()
|
||||||
elif token_.prefix.endswith(safeword):
|
elif token.prefix.endswith(safeword):
|
||||||
# This happens with comments.
|
# This happens with comments.
|
||||||
raise EndMarkerReached()
|
raise EndMarkerReached()
|
||||||
|
elif token.string.endswith(safeword):
|
||||||
|
yield token # Probably an f-string literal that was not finished.
|
||||||
|
raise EndMarkerReached()
|
||||||
else:
|
else:
|
||||||
yield token_
|
yield token
|
||||||
|
|
||||||
# The code might be indedented, just remove it.
|
# The code might be indedented, just remove it.
|
||||||
code = dedent(_get_code_for_stack(code_lines, module_node, pos))
|
code = dedent(_get_code_for_stack(code_lines, module_node, pos))
|
||||||
@@ -127,59 +129,11 @@ def get_stack_at_position(grammar, code_lines, module_node, pos):
|
|||||||
try:
|
try:
|
||||||
p.parse(tokens=tokenize_without_endmarker(code))
|
p.parse(tokens=tokenize_without_endmarker(code))
|
||||||
except EndMarkerReached:
|
except EndMarkerReached:
|
||||||
return Stack(p.pgen_parser.stack)
|
return p.stack
|
||||||
raise SystemError("This really shouldn't happen. There's a bug in Jedi.")
|
raise SystemError(
|
||||||
|
"This really shouldn't happen. There's a bug in Jedi:\n%s"
|
||||||
|
% list(tokenize_without_endmarker(code))
|
||||||
class Stack(list):
|
)
|
||||||
def get_node_names(self, grammar):
|
|
||||||
for dfa, state, (node_number, nodes) in self:
|
|
||||||
yield grammar.number2symbol[node_number]
|
|
||||||
|
|
||||||
def get_nodes(self):
|
|
||||||
for dfa, state, (node_number, nodes) in self:
|
|
||||||
for node in nodes:
|
|
||||||
yield node
|
|
||||||
|
|
||||||
|
|
||||||
def get_possible_completion_types(pgen_grammar, stack):
|
|
||||||
def add_results(label_index):
|
|
||||||
try:
|
|
||||||
grammar_labels.append(inversed_tokens[label_index])
|
|
||||||
except KeyError:
|
|
||||||
try:
|
|
||||||
keywords.append(inversed_keywords[label_index])
|
|
||||||
except KeyError:
|
|
||||||
t, v = pgen_grammar.labels[label_index]
|
|
||||||
assert t >= 256
|
|
||||||
# See if it's a symbol and if we're in its first set
|
|
||||||
inversed_keywords
|
|
||||||
itsdfa = pgen_grammar.dfas[t]
|
|
||||||
itsstates, itsfirst = itsdfa
|
|
||||||
for first_label_index in itsfirst.keys():
|
|
||||||
add_results(first_label_index)
|
|
||||||
|
|
||||||
inversed_keywords = dict((v, k) for k, v in pgen_grammar.keywords.items())
|
|
||||||
inversed_tokens = dict((v, k) for k, v in pgen_grammar.tokens.items())
|
|
||||||
|
|
||||||
keywords = []
|
|
||||||
grammar_labels = []
|
|
||||||
|
|
||||||
def scan_stack(index):
|
|
||||||
dfa, state, node = stack[index]
|
|
||||||
states, first = dfa
|
|
||||||
arcs = states[state]
|
|
||||||
|
|
||||||
for label_index, new_state in arcs:
|
|
||||||
if label_index == 0:
|
|
||||||
# An accepting state, check the stack below.
|
|
||||||
scan_stack(index - 1)
|
|
||||||
else:
|
|
||||||
add_results(label_index)
|
|
||||||
|
|
||||||
scan_stack(-1)
|
|
||||||
|
|
||||||
return keywords, grammar_labels
|
|
||||||
|
|
||||||
|
|
||||||
def evaluate_goto_definition(evaluator, context, leaf):
|
def evaluate_goto_definition(evaluator, context, leaf):
|
||||||
@@ -218,7 +172,8 @@ def _get_index_and_key(nodes, position):
|
|||||||
|
|
||||||
if nodes_before:
|
if nodes_before:
|
||||||
last = nodes_before[-1]
|
last = nodes_before[-1]
|
||||||
if last.type == 'argument' and last.children[1].end_pos <= position:
|
if last.type == 'argument' and last.children[1] == '=' \
|
||||||
|
and last.children[1].end_pos <= position:
|
||||||
# Checked if the argument
|
# Checked if the argument
|
||||||
key_str = last.children[0].value
|
key_str = last.children[0].value
|
||||||
elif last == '=':
|
elif last == '=':
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from jedi._compatibility import FileNotFoundError, NotADirectoryError
|
from jedi._compatibility import FileNotFoundError, NotADirectoryError, PermissionError
|
||||||
from jedi.api.environment import SameEnvironment, \
|
from jedi.api.environment import SameEnvironment, \
|
||||||
get_cached_default_environment
|
get_cached_default_environment
|
||||||
from jedi.api.exceptions import WrongVersion
|
from jedi.api.exceptions import WrongVersion
|
||||||
@@ -85,7 +85,7 @@ class Project(object):
|
|||||||
if environment is None:
|
if environment is None:
|
||||||
environment = self.get_environment()
|
environment = self.get_environment()
|
||||||
|
|
||||||
sys_path = environment.get_sys_path()
|
sys_path = list(environment.get_sys_path())
|
||||||
try:
|
try:
|
||||||
sys_path.remove('')
|
sys_path.remove('')
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@@ -108,12 +108,7 @@ class Project(object):
|
|||||||
if evaluator.script_path is not None:
|
if evaluator.script_path is not None:
|
||||||
suffixed += discover_buildout_paths(evaluator, evaluator.script_path)
|
suffixed += discover_buildout_paths(evaluator, evaluator.script_path)
|
||||||
|
|
||||||
traversed = []
|
traversed = list(traverse_parents(evaluator.script_path))
|
||||||
for parent in traverse_parents(evaluator.script_path):
|
|
||||||
traversed.append(parent)
|
|
||||||
if parent == self._path:
|
|
||||||
# Don't go futher than the project path.
|
|
||||||
break
|
|
||||||
|
|
||||||
# AFAIK some libraries have imports like `foo.foo.bar`, which
|
# AFAIK some libraries have imports like `foo.foo.bar`, which
|
||||||
# leads to the conclusion to by default prefer longer paths
|
# leads to the conclusion to by default prefer longer paths
|
||||||
@@ -156,7 +151,7 @@ def _is_django_path(directory):
|
|||||||
try:
|
try:
|
||||||
with open(os.path.join(directory, 'manage.py'), 'rb') as f:
|
with open(os.path.join(directory, 'manage.py'), 'rb') as f:
|
||||||
return b"DJANGO_SETTINGS_MODULE" in f.read()
|
return b"DJANGO_SETTINGS_MODULE" in f.read()
|
||||||
except (FileNotFoundError, NotADirectoryError):
|
except (FileNotFoundError, NotADirectoryError, PermissionError):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return False
|
return False
|
||||||
@@ -172,7 +167,7 @@ def get_default_project(path=None):
|
|||||||
for dir in traverse_parents(check, include_current=True):
|
for dir in traverse_parents(check, include_current=True):
|
||||||
try:
|
try:
|
||||||
return Project.load(dir)
|
return Project.load(dir)
|
||||||
except (FileNotFoundError, NotADirectoryError):
|
except (FileNotFoundError, NotADirectoryError, PermissionError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if first_no_init_file is None:
|
if first_no_init_file is None:
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
"""
|
"""
|
||||||
To use Jedi completion in Python interpreter, add the following in your shell
|
To use Jedi completion in Python interpreter, add the following in your shell
|
||||||
setup (e.g., ``.bashrc``)::
|
setup (e.g., ``.bashrc``). This works only on Linux/Mac, because readline is
|
||||||
|
not available on Windows. If you still want Jedi autocompletion in your REPL,
|
||||||
|
just use IPython instead::
|
||||||
|
|
||||||
export PYTHONSTARTUP="$(python -m jedi repl)"
|
export PYTHONSTARTUP="$(python -m jedi repl)"
|
||||||
|
|
||||||
|
|||||||
@@ -126,6 +126,7 @@ def time_cache(seconds):
|
|||||||
|
|
||||||
wrapper.clear_cache = lambda: cache.clear()
|
wrapper.clear_cache = lambda: cache.clear()
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import os
|
import os
|
||||||
|
from contextlib import contextmanager
|
||||||
|
|
||||||
|
|
||||||
def traverse_parents(path, include_current=False):
|
def traverse_parents(path, include_current=False):
|
||||||
@@ -10,3 +11,16 @@ def traverse_parents(path, include_current=False):
|
|||||||
yield path
|
yield path
|
||||||
previous = path
|
previous = path
|
||||||
path = os.path.dirname(path)
|
path = os.path.dirname(path)
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def monkeypatch(obj, attribute_name, new_value):
|
||||||
|
"""
|
||||||
|
Like pytest's monkeypatch, but as a context manager.
|
||||||
|
"""
|
||||||
|
old_value = getattr(obj, attribute_name)
|
||||||
|
try:
|
||||||
|
setattr(obj, attribute_name, new_value)
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
setattr(obj, attribute_name, old_value)
|
||||||
|
|||||||
@@ -2,16 +2,17 @@ from jedi._compatibility import encoding, is_py3, u
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
_inited = False
|
||||||
|
|
||||||
|
|
||||||
def _lazy_colorama_init():
|
def _lazy_colorama_init():
|
||||||
"""
|
"""
|
||||||
Lazily init colorama if necessary, not to screw up stdout is debug not
|
Lazily init colorama if necessary, not to screw up stdout if debugging is
|
||||||
enabled.
|
not enabled.
|
||||||
|
|
||||||
This version of the function does nothing.
|
This version of the function does nothing.
|
||||||
"""
|
"""
|
||||||
pass
|
|
||||||
|
|
||||||
_inited=False
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if os.name == 'nt':
|
if os.name == 'nt':
|
||||||
@@ -21,7 +22,8 @@ try:
|
|||||||
# Use colorama for nicer console output.
|
# Use colorama for nicer console output.
|
||||||
from colorama import Fore, init
|
from colorama import Fore, init
|
||||||
from colorama import initialise
|
from colorama import initialise
|
||||||
def _lazy_colorama_init():
|
|
||||||
|
def _lazy_colorama_init(): # noqa: F811
|
||||||
"""
|
"""
|
||||||
Lazily init colorama if necessary, not to screw up stdout is
|
Lazily init colorama if necessary, not to screw up stdout is
|
||||||
debug not enabled.
|
debug not enabled.
|
||||||
|
|||||||
@@ -105,6 +105,9 @@ class Evaluator(object):
|
|||||||
self.is_analysis = False
|
self.is_analysis = False
|
||||||
self.project = project
|
self.project = project
|
||||||
self.access_cache = {}
|
self.access_cache = {}
|
||||||
|
# This setting is only temporary to limit the work we have to do with
|
||||||
|
# tensorflow and others.
|
||||||
|
self.infer_enabled = True
|
||||||
|
|
||||||
self.reset_recursion_limitations()
|
self.reset_recursion_limitations()
|
||||||
self.allow_different_encoding = True
|
self.allow_different_encoding = True
|
||||||
@@ -123,6 +126,9 @@ class Evaluator(object):
|
|||||||
return self.project._get_sys_path(self, environment=self.environment)
|
return self.project._get_sys_path(self, environment=self.environment)
|
||||||
|
|
||||||
def eval_element(self, context, element):
|
def eval_element(self, context, element):
|
||||||
|
if not self.infer_enabled:
|
||||||
|
return NO_CONTEXTS
|
||||||
|
|
||||||
if isinstance(context, CompForContext):
|
if isinstance(context, CompForContext):
|
||||||
return eval_node(context, element)
|
return eval_node(context, element)
|
||||||
|
|
||||||
@@ -216,7 +222,7 @@ class Evaluator(object):
|
|||||||
if type_ == 'classdef':
|
if type_ == 'classdef':
|
||||||
return [ClassContext(self, context, name.parent)]
|
return [ClassContext(self, context, name.parent)]
|
||||||
elif type_ == 'funcdef':
|
elif type_ == 'funcdef':
|
||||||
return [FunctionContext(self, context, name.parent)]
|
return [FunctionContext.from_context(context, name.parent)]
|
||||||
|
|
||||||
if type_ == 'expr_stmt':
|
if type_ == 'expr_stmt':
|
||||||
is_simple_name = name.parent.type not in ('power', 'trailer')
|
is_simple_name = name.parent.type not in ('power', 'trailer')
|
||||||
@@ -334,16 +340,15 @@ class Evaluator(object):
|
|||||||
parent_context = from_scope_node(parent_scope, child_is_funcdef=is_funcdef)
|
parent_context = from_scope_node(parent_scope, child_is_funcdef=is_funcdef)
|
||||||
|
|
||||||
if is_funcdef:
|
if is_funcdef:
|
||||||
|
func = FunctionContext.from_context(
|
||||||
|
parent_context,
|
||||||
|
scope_node
|
||||||
|
)
|
||||||
if isinstance(parent_context, AnonymousInstance):
|
if isinstance(parent_context, AnonymousInstance):
|
||||||
func = BoundMethod(
|
func = BoundMethod(
|
||||||
self, parent_context, parent_context.class_context,
|
instance=parent_context,
|
||||||
parent_context.parent_context, scope_node
|
klass=parent_context.class_context,
|
||||||
)
|
function=func
|
||||||
else:
|
|
||||||
func = FunctionContext(
|
|
||||||
self,
|
|
||||||
parent_context,
|
|
||||||
scope_node
|
|
||||||
)
|
)
|
||||||
if is_nested and not node_is_object:
|
if is_nested and not node_is_object:
|
||||||
return func.get_function_execution()
|
return func.get_function_execution()
|
||||||
@@ -373,12 +378,12 @@ class Evaluator(object):
|
|||||||
scope_node = parent_scope(node)
|
scope_node = parent_scope(node)
|
||||||
return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object)
|
return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object)
|
||||||
|
|
||||||
def parse_and_get_code(self, code=None, path=None, **kwargs):
|
def parse_and_get_code(self, code=None, path=None, encoding='utf-8', **kwargs):
|
||||||
if self.allow_different_encoding:
|
if self.allow_different_encoding:
|
||||||
if code is None:
|
if code is None:
|
||||||
with open(path, 'rb') as f:
|
with open(path, 'rb') as f:
|
||||||
code = f.read()
|
code = f.read()
|
||||||
code = python_bytes_to_unicode(code, errors='replace')
|
code = python_bytes_to_unicode(code, encoding=encoding, errors='replace')
|
||||||
|
|
||||||
return self.grammar.parse(code=code, path=path, **kwargs), code
|
return self.grammar.parse(code=code, path=path, **kwargs), code
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
from parso.python import tree
|
from parso.python import tree
|
||||||
|
|
||||||
from jedi._compatibility import zip_longest
|
from jedi._compatibility import zip_longest
|
||||||
@@ -8,7 +10,7 @@ from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \
|
|||||||
from jedi.evaluate.filters import ParamName
|
from jedi.evaluate.filters import ParamName
|
||||||
from jedi.evaluate.base_context import NO_CONTEXTS
|
from jedi.evaluate.base_context import NO_CONTEXTS
|
||||||
from jedi.evaluate.context import iterable
|
from jedi.evaluate.context import iterable
|
||||||
from jedi.evaluate.param import get_params, ExecutedParam
|
from jedi.evaluate.param import get_executed_params, ExecutedParam
|
||||||
|
|
||||||
|
|
||||||
def try_iter_content(types, depth=0):
|
def try_iter_content(types, depth=0):
|
||||||
@@ -28,32 +30,83 @@ def try_iter_content(types, depth=0):
|
|||||||
try_iter_content(lazy_context.infer(), depth + 1)
|
try_iter_content(lazy_context.infer(), depth + 1)
|
||||||
|
|
||||||
|
|
||||||
|
def repack_with_argument_clinic(string, keep_arguments_param=False):
|
||||||
|
"""
|
||||||
|
Transforms a function or method with arguments to the signature that is
|
||||||
|
given as an argument clinic notation.
|
||||||
|
|
||||||
|
Argument clinic is part of CPython and used for all the functions that are
|
||||||
|
implemented in C (Python 3.7):
|
||||||
|
|
||||||
|
str.split.__text_signature__
|
||||||
|
# Results in: '($self, /, sep=None, maxsplit=-1)'
|
||||||
|
"""
|
||||||
|
clinic_args = list(_parse_argument_clinic(string))
|
||||||
|
|
||||||
|
def decorator(func):
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
if keep_arguments_param:
|
||||||
|
arguments = kwargs['arguments']
|
||||||
|
else:
|
||||||
|
arguments = kwargs.pop('arguments')
|
||||||
|
try:
|
||||||
|
args += tuple(_iterate_argument_clinic(arguments, clinic_args))
|
||||||
|
except ValueError:
|
||||||
|
return NO_CONTEXTS
|
||||||
|
else:
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
def _iterate_argument_clinic(arguments, parameters):
|
||||||
|
"""Uses a list with argument clinic information (see PEP 436)."""
|
||||||
|
iterator = arguments.unpack()
|
||||||
|
for i, (name, optional, allow_kwargs) in enumerate(parameters):
|
||||||
|
key, argument = next(iterator, (None, None))
|
||||||
|
if key is not None:
|
||||||
|
debug.warning('Keyword arguments in argument clinic are currently not supported.')
|
||||||
|
raise ValueError
|
||||||
|
if argument is None and not optional:
|
||||||
|
debug.warning('TypeError: %s expected at least %s arguments, got %s',
|
||||||
|
name, len(parameters), i)
|
||||||
|
raise ValueError
|
||||||
|
|
||||||
|
context_set = NO_CONTEXTS if argument is None else argument.infer()
|
||||||
|
|
||||||
|
if not context_set and not optional:
|
||||||
|
# For the stdlib we always want values. If we don't get them,
|
||||||
|
# that's ok, maybe something is too hard to resolve, however,
|
||||||
|
# we will not proceed with the evaluation of that function.
|
||||||
|
debug.warning('argument_clinic "%s" not resolvable.', name)
|
||||||
|
raise ValueError
|
||||||
|
yield context_set
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_argument_clinic(string):
|
||||||
|
allow_kwargs = False
|
||||||
|
optional = False
|
||||||
|
while string:
|
||||||
|
# Optional arguments have to begin with a bracket. And should always be
|
||||||
|
# at the end of the arguments. This is therefore not a proper argument
|
||||||
|
# clinic implementation. `range()` for exmple allows an optional start
|
||||||
|
# value at the beginning.
|
||||||
|
match = re.match('(?:(?:(\[),? ?|, ?|)(\w+)|, ?/)\]*', string)
|
||||||
|
string = string[len(match.group(0)):]
|
||||||
|
if not match.group(2): # A slash -> allow named arguments
|
||||||
|
allow_kwargs = True
|
||||||
|
continue
|
||||||
|
optional = optional or bool(match.group(1))
|
||||||
|
word = match.group(2)
|
||||||
|
yield (word, optional, allow_kwargs)
|
||||||
|
|
||||||
|
|
||||||
class AbstractArguments(object):
|
class AbstractArguments(object):
|
||||||
context = None
|
context = None
|
||||||
argument_node = None
|
argument_node = None
|
||||||
trailer = None
|
trailer = None
|
||||||
|
|
||||||
def eval_argument_clinic(self, parameters):
|
|
||||||
"""Uses a list with argument clinic information (see PEP 436)."""
|
|
||||||
iterator = self.unpack()
|
|
||||||
for i, (name, optional, allow_kwargs) in enumerate(parameters):
|
|
||||||
key, argument = next(iterator, (None, None))
|
|
||||||
if key is not None:
|
|
||||||
raise NotImplementedError
|
|
||||||
if argument is None and not optional:
|
|
||||||
debug.warning('TypeError: %s expected at least %s arguments, got %s',
|
|
||||||
name, len(parameters), i)
|
|
||||||
raise ValueError
|
|
||||||
values = NO_CONTEXTS if argument is None else argument.infer()
|
|
||||||
|
|
||||||
if not values and not optional:
|
|
||||||
# For the stdlib we always want values. If we don't get them,
|
|
||||||
# that's ok, maybe something is too hard to resolve, however,
|
|
||||||
# we will not proceed with the evaluation of that function.
|
|
||||||
debug.warning('argument_clinic "%s" not resolvable.', name)
|
|
||||||
raise ValueError
|
|
||||||
yield values
|
|
||||||
|
|
||||||
def eval_all(self, funcdef=None):
|
def eval_all(self, funcdef=None):
|
||||||
"""
|
"""
|
||||||
Evaluates all arguments as a support for static analysis
|
Evaluates all arguments as a support for static analysis
|
||||||
@@ -64,17 +117,17 @@ class AbstractArguments(object):
|
|||||||
try_iter_content(types)
|
try_iter_content(types)
|
||||||
|
|
||||||
def get_calling_nodes(self):
|
def get_calling_nodes(self):
|
||||||
raise NotImplementedError
|
return []
|
||||||
|
|
||||||
def unpack(self, funcdef=None):
|
def unpack(self, funcdef=None):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def get_params(self, execution_context):
|
def get_executed_params(self, execution_context):
|
||||||
return get_params(execution_context, self)
|
return get_executed_params(execution_context, self)
|
||||||
|
|
||||||
|
|
||||||
class AnonymousArguments(AbstractArguments):
|
class AnonymousArguments(AbstractArguments):
|
||||||
def get_params(self, execution_context):
|
def get_executed_params(self, execution_context):
|
||||||
from jedi.evaluate.dynamic import search_params
|
from jedi.evaluate.dynamic import search_params
|
||||||
return search_params(
|
return search_params(
|
||||||
execution_context.evaluator,
|
execution_context.evaluator,
|
||||||
@@ -82,6 +135,9 @@ class AnonymousArguments(AbstractArguments):
|
|||||||
execution_context.tree_node
|
execution_context.tree_node
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '%s()' % self.__class__.__name__
|
||||||
|
|
||||||
|
|
||||||
class TreeArguments(AbstractArguments):
|
class TreeArguments(AbstractArguments):
|
||||||
def __init__(self, evaluator, context, argument_node, trailer=None):
|
def __init__(self, evaluator, context, argument_node, trailer=None):
|
||||||
@@ -171,7 +227,7 @@ class TreeArguments(AbstractArguments):
|
|||||||
return '<%s: %s>' % (self.__class__.__name__, self.argument_node)
|
return '<%s: %s>' % (self.__class__.__name__, self.argument_node)
|
||||||
|
|
||||||
def get_calling_nodes(self):
|
def get_calling_nodes(self):
|
||||||
from jedi.evaluate.dynamic import MergedExecutedParams
|
from jedi.evaluate.dynamic import DynamicExecutedParams
|
||||||
old_arguments_list = []
|
old_arguments_list = []
|
||||||
arguments = self
|
arguments = self
|
||||||
|
|
||||||
@@ -190,7 +246,7 @@ class TreeArguments(AbstractArguments):
|
|||||||
if not isinstance(names[0], ParamName):
|
if not isinstance(names[0], ParamName):
|
||||||
break
|
break
|
||||||
param = names[0].get_param()
|
param = names[0].get_param()
|
||||||
if isinstance(param, MergedExecutedParams):
|
if isinstance(param, DynamicExecutedParams):
|
||||||
# For dynamic searches we don't even want to see errors.
|
# For dynamic searches we don't even want to see errors.
|
||||||
return []
|
return []
|
||||||
if not isinstance(param, ExecutedParam):
|
if not isinstance(param, ExecutedParam):
|
||||||
@@ -215,9 +271,6 @@ class ValuesArguments(AbstractArguments):
|
|||||||
for values in self._values_list:
|
for values in self._values_list:
|
||||||
yield None, LazyKnownContexts(values)
|
yield None, LazyKnownContexts(values)
|
||||||
|
|
||||||
def get_calling_nodes(self):
|
|
||||||
return []
|
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<%s: %s>' % (self.__class__.__name__, self._values_list)
|
return '<%s: %s>' % (self.__class__.__name__, self._values_list)
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,8 @@ from jedi import debug
|
|||||||
from jedi._compatibility import Python3Method, zip_longest, unicode
|
from jedi._compatibility import Python3Method, zip_longest, unicode
|
||||||
from jedi.parser_utils import clean_scope_docstring, get_doc_with_call_signature
|
from jedi.parser_utils import clean_scope_docstring, get_doc_with_call_signature
|
||||||
from jedi.common import BaseContextSet, BaseContext
|
from jedi.common import BaseContextSet, BaseContext
|
||||||
|
from jedi.evaluate.helpers import EvaluatorIndexError, EvaluatorTypeError, \
|
||||||
|
EvaluatorKeyError
|
||||||
|
|
||||||
|
|
||||||
class Context(BaseContext):
|
class Context(BaseContext):
|
||||||
@@ -128,11 +130,15 @@ class Context(BaseContext):
|
|||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
result |= getitem(index)
|
result |= getitem(index)
|
||||||
except IndexError:
|
except EvaluatorIndexError:
|
||||||
result |= iterate_contexts(ContextSet(self))
|
result |= iterate_contexts(ContextSet(self))
|
||||||
except KeyError:
|
except EvaluatorKeyError:
|
||||||
# Must be a dict. Lists don't raise KeyErrors.
|
# Must be a dict. Lists don't raise KeyErrors.
|
||||||
result |= self.dict_values()
|
result |= self.dict_values()
|
||||||
|
except EvaluatorTypeError:
|
||||||
|
# The type is wrong and therefore it makes no sense to do
|
||||||
|
# anything anymore.
|
||||||
|
result = NO_CONTEXTS
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def eval_node(self, node):
|
def eval_node(self, node):
|
||||||
@@ -193,9 +199,10 @@ def iterate_contexts(contexts, contextualized_node=None, is_async=False):
|
|||||||
|
|
||||||
|
|
||||||
class TreeContext(Context):
|
class TreeContext(Context):
|
||||||
def __init__(self, evaluator, parent_context=None):
|
def __init__(self, evaluator, parent_context, tree_node):
|
||||||
super(TreeContext, self).__init__(evaluator, parent_context)
|
super(TreeContext, self).__init__(evaluator, parent_context)
|
||||||
self.predefined_names = {}
|
self.predefined_names = {}
|
||||||
|
self.tree_node = tree_node
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
|
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
|
||||||
|
|||||||
@@ -32,8 +32,12 @@ def get_string_context_set(evaluator):
|
|||||||
return builtin_from_name(evaluator, u'str').execute_evaluated()
|
return builtin_from_name(evaluator, u'str').execute_evaluated()
|
||||||
|
|
||||||
|
|
||||||
def load_module(evaluator, **kwargs):
|
def load_module(evaluator, dotted_name, **kwargs):
|
||||||
access_path = evaluator.compiled_subprocess.load_module(**kwargs)
|
# Temporary, some tensorflow builtins cannot be loaded, so it's tried again
|
||||||
|
# and again and it's really slow.
|
||||||
|
if dotted_name.startswith('tensorflow.'):
|
||||||
|
return None
|
||||||
|
access_path = evaluator.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs)
|
||||||
if access_path is None:
|
if access_path is None:
|
||||||
return None
|
return None
|
||||||
return create_from_access_path(evaluator, access_path)
|
return create_from_access_path(evaluator, access_path)
|
||||||
|
|||||||
@@ -5,11 +5,9 @@ from textwrap import dedent
|
|||||||
import operator as op
|
import operator as op
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
from jedi import debug
|
from jedi._compatibility import unicode, is_py3, builtins, \
|
||||||
from jedi._compatibility import unicode, is_py3, is_py34, builtins, \
|
|
||||||
py_version, force_unicode, print_to_stderr
|
py_version, force_unicode, print_to_stderr
|
||||||
from jedi.evaluate.compiled.getattr_static import getattr_static
|
from jedi.evaluate.compiled.getattr_static import getattr_static
|
||||||
from jedi.evaluate.utils import dotted_from_fs_path
|
|
||||||
|
|
||||||
|
|
||||||
MethodDescriptorType = type(str.replace)
|
MethodDescriptorType = type(str.replace)
|
||||||
@@ -33,10 +31,9 @@ NOT_CLASS_TYPES = (
|
|||||||
if is_py3:
|
if is_py3:
|
||||||
NOT_CLASS_TYPES += (
|
NOT_CLASS_TYPES += (
|
||||||
types.MappingProxyType,
|
types.MappingProxyType,
|
||||||
types.SimpleNamespace
|
types.SimpleNamespace,
|
||||||
|
types.DynamicClassAttribute,
|
||||||
)
|
)
|
||||||
if is_py34:
|
|
||||||
NOT_CLASS_TYPES += (types.DynamicClassAttribute,)
|
|
||||||
|
|
||||||
|
|
||||||
# Those types don't exist in typing.
|
# Those types don't exist in typing.
|
||||||
@@ -137,20 +134,13 @@ def create_access(evaluator, obj):
|
|||||||
return evaluator.compiled_subprocess.get_or_create_access_handle(obj)
|
return evaluator.compiled_subprocess.get_or_create_access_handle(obj)
|
||||||
|
|
||||||
|
|
||||||
def load_module(evaluator, path=None, name=None, sys_path=None):
|
def load_module(evaluator, dotted_name, sys_path):
|
||||||
if sys_path is None:
|
|
||||||
sys_path = list(evaluator.get_sys_path())
|
|
||||||
if path is not None:
|
|
||||||
dotted_path = dotted_from_fs_path(path, sys_path=sys_path)
|
|
||||||
else:
|
|
||||||
dotted_path = name
|
|
||||||
|
|
||||||
temp, sys.path = sys.path, sys_path
|
temp, sys.path = sys.path, sys_path
|
||||||
try:
|
try:
|
||||||
__import__(dotted_path)
|
__import__(dotted_name)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# If a module is "corrupt" or not really a Python module or whatever.
|
# If a module is "corrupt" or not really a Python module or whatever.
|
||||||
debug.warning('Module %s not importable in path %s.', dotted_path, path)
|
print_to_stderr('Module %s not importable in path %s.' % (dotted_name, sys_path))
|
||||||
return None
|
return None
|
||||||
except Exception:
|
except Exception:
|
||||||
# Since __import__ pretty much makes code execution possible, just
|
# Since __import__ pretty much makes code execution possible, just
|
||||||
@@ -163,7 +153,7 @@ def load_module(evaluator, path=None, name=None, sys_path=None):
|
|||||||
|
|
||||||
# Just access the cache after import, because of #59 as well as the very
|
# Just access the cache after import, because of #59 as well as the very
|
||||||
# complicated import structure of Python.
|
# complicated import structure of Python.
|
||||||
module = sys.modules[dotted_path]
|
module = sys.modules[dotted_name]
|
||||||
return create_access_path(evaluator, module)
|
return create_access_path(evaluator, module)
|
||||||
|
|
||||||
|
|
||||||
@@ -262,6 +252,9 @@ class DirectObjectAccess(object):
|
|||||||
def py__bases__(self):
|
def py__bases__(self):
|
||||||
return [self._create_access_path(base) for base in self._obj.__bases__]
|
return [self._create_access_path(base) for base in self._obj.__bases__]
|
||||||
|
|
||||||
|
def py__path__(self):
|
||||||
|
return self._obj.__path__
|
||||||
|
|
||||||
@_force_unicode_decorator
|
@_force_unicode_decorator
|
||||||
def get_repr(self):
|
def get_repr(self):
|
||||||
builtins = 'builtins', '__builtin__'
|
builtins = 'builtins', '__builtin__'
|
||||||
@@ -476,7 +469,7 @@ else:
|
|||||||
|
|
||||||
class _SPECIAL_OBJECTS(object):
|
class _SPECIAL_OBJECTS(object):
|
||||||
FUNCTION_CLASS = types.FunctionType
|
FUNCTION_CLASS = types.FunctionType
|
||||||
METHOD_CLASS = type(DirectObjectAccess.py__bool__)
|
BOUND_METHOD_CLASS = type(DirectObjectAccess(None, None).py__bool__)
|
||||||
MODULE_CLASS = types.ModuleType
|
MODULE_CLASS = types.ModuleType
|
||||||
GENERATOR_OBJECT = _a_generator(1.0)
|
GENERATOR_OBJECT = _a_generator(1.0)
|
||||||
BUILTINS = builtins
|
BUILTINS = builtins
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ from jedi.evaluate.base_context import Context, ContextSet
|
|||||||
from jedi.evaluate.lazy_context import LazyKnownContext
|
from jedi.evaluate.lazy_context import LazyKnownContext
|
||||||
from jedi.evaluate.compiled.access import _sentinel
|
from jedi.evaluate.compiled.access import _sentinel
|
||||||
from jedi.evaluate.cache import evaluator_function_cache
|
from jedi.evaluate.cache import evaluator_function_cache
|
||||||
|
from jedi.evaluate.helpers import reraise_as_evaluator
|
||||||
from . import fake
|
from . import fake
|
||||||
|
|
||||||
|
|
||||||
@@ -54,7 +55,7 @@ class CompiledObject(Context):
|
|||||||
return FunctionContext(
|
return FunctionContext(
|
||||||
self.evaluator,
|
self.evaluator,
|
||||||
parent_context=self.parent_context,
|
parent_context=self.parent_context,
|
||||||
funcdef=self.tree_node
|
tree_node=self.tree_node
|
||||||
).py__call__(params)
|
).py__call__(params)
|
||||||
if self.access_handle.is_class():
|
if self.access_handle.is_class():
|
||||||
from jedi.evaluate.context import CompiledInstance
|
from jedi.evaluate.context import CompiledInstance
|
||||||
@@ -80,6 +81,10 @@ class CompiledObject(Context):
|
|||||||
for access in self.access_handle.py__bases__()
|
for access in self.access_handle.py__bases__()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@CheckAttribute
|
||||||
|
def py__path__(self):
|
||||||
|
return self.access_handle.py__path__()
|
||||||
|
|
||||||
def py__bool__(self):
|
def py__bool__(self):
|
||||||
return self.access_handle.py__bool__()
|
return self.access_handle.py__bool__()
|
||||||
|
|
||||||
@@ -145,7 +150,8 @@ class CompiledObject(Context):
|
|||||||
|
|
||||||
@CheckAttribute
|
@CheckAttribute
|
||||||
def py__getitem__(self, index):
|
def py__getitem__(self, index):
|
||||||
access = self.access_handle.py__getitem__(index)
|
with reraise_as_evaluator(IndexError, KeyError, TypeError):
|
||||||
|
access = self.access_handle.py__getitem__(index)
|
||||||
if access is None:
|
if access is None:
|
||||||
return ContextSet()
|
return ContextSet()
|
||||||
|
|
||||||
@@ -441,8 +447,11 @@ def create_from_name(evaluator, compiled_object, name):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
access = compiled_object.access_handle.getattr(name, default=None)
|
access = compiled_object.access_handle.getattr(name, default=None)
|
||||||
|
parent_context = compiled_object
|
||||||
|
if parent_context.is_class():
|
||||||
|
parent_context = parent_context.parent_context
|
||||||
return create_cached_compiled_object(
|
return create_cached_compiled_object(
|
||||||
evaluator, access, parent_context=compiled_object, faked=faked
|
evaluator, access, parent_context=parent_context, faked=faked
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ def _load_module(evaluator, path):
|
|||||||
module_node = evaluator.grammar.parse(
|
module_node = evaluator.grammar.parse(
|
||||||
path=path,
|
path=path,
|
||||||
cache=True,
|
cache=True,
|
||||||
diff_cache=True,
|
diff_cache=settings.fast_parser,
|
||||||
cache_path=settings.cache_directory
|
cache_path=settings.cache_directory
|
||||||
).get_root_node()
|
).get_root_node()
|
||||||
# python_module = inspect.getmodule(python_object)
|
# python_module = inspect.getmodule(python_object)
|
||||||
@@ -166,11 +166,10 @@ def _find_syntax_node_name(evaluator, access_handle):
|
|||||||
return None # It's too hard to find lambdas.
|
return None # It's too hard to find lambdas.
|
||||||
|
|
||||||
# Doesn't always work (e.g. os.stat_result)
|
# Doesn't always work (e.g. os.stat_result)
|
||||||
try:
|
names = module_node.get_used_names().get(name_str, [])
|
||||||
names = module_node.get_used_names()[name_str]
|
|
||||||
except KeyError:
|
|
||||||
return None
|
|
||||||
names = [n for n in names if n.is_definition()]
|
names = [n for n in names if n.is_definition()]
|
||||||
|
if not names:
|
||||||
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
code = python_object.__code__
|
code = python_object.__code__
|
||||||
|
|||||||
@@ -15,26 +15,41 @@ import errno
|
|||||||
import weakref
|
import weakref
|
||||||
import traceback
|
import traceback
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
from threading import Thread
|
||||||
|
try:
|
||||||
|
from queue import Queue, Empty
|
||||||
|
except ImportError:
|
||||||
|
from Queue import Queue, Empty # python 2.7
|
||||||
|
|
||||||
from jedi._compatibility import queue, is_py3, force_unicode, \
|
from jedi._compatibility import queue, is_py3, force_unicode, \
|
||||||
pickle_dump, pickle_load, GeneralizedPopen
|
pickle_dump, pickle_load, GeneralizedPopen, print_to_stderr
|
||||||
|
from jedi import debug
|
||||||
from jedi.cache import memoize_method
|
from jedi.cache import memoize_method
|
||||||
from jedi.evaluate.compiled.subprocess import functions
|
from jedi.evaluate.compiled.subprocess import functions
|
||||||
from jedi.evaluate.compiled.access import DirectObjectAccess, AccessPath, \
|
from jedi.evaluate.compiled.access import DirectObjectAccess, AccessPath, \
|
||||||
SignatureParam
|
SignatureParam
|
||||||
from jedi.api.exceptions import InternalError
|
from jedi.api.exceptions import InternalError
|
||||||
|
|
||||||
_subprocesses = {}
|
|
||||||
|
|
||||||
_MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py')
|
_MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py')
|
||||||
|
|
||||||
|
|
||||||
def get_subprocess(executable):
|
def _enqueue_output(out, queue):
|
||||||
try:
|
for line in iter(out.readline, b''):
|
||||||
return _subprocesses[executable]
|
queue.put(line)
|
||||||
except KeyError:
|
out.close()
|
||||||
sub = _subprocesses[executable] = _CompiledSubprocess(executable)
|
|
||||||
return sub
|
|
||||||
|
def _add_stderr_to_debug(stderr_queue):
|
||||||
|
while True:
|
||||||
|
# Try to do some error reporting from the subprocess and print its
|
||||||
|
# stderr contents.
|
||||||
|
try:
|
||||||
|
line = stderr_queue.get_nowait()
|
||||||
|
line = line.decode('utf-8', 'replace')
|
||||||
|
debug.warning('stderr output: %s' % line.rstrip('\n'))
|
||||||
|
except Empty:
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
def _get_function(name):
|
def _get_function(name):
|
||||||
@@ -118,31 +133,57 @@ class EvaluatorSubprocess(_EvaluatorProcess):
|
|||||||
return obj
|
return obj
|
||||||
|
|
||||||
def __del__(self):
|
def __del__(self):
|
||||||
if self._used:
|
if self._used and not self._compiled_subprocess.is_crashed:
|
||||||
self._compiled_subprocess.delete_evaluator(self._evaluator_id)
|
self._compiled_subprocess.delete_evaluator(self._evaluator_id)
|
||||||
|
|
||||||
|
|
||||||
class _CompiledSubprocess(object):
|
class CompiledSubprocess(object):
|
||||||
_crashed = False
|
is_crashed = False
|
||||||
|
# Start with 2, gets set after _get_info.
|
||||||
|
_pickle_protocol = 2
|
||||||
|
|
||||||
def __init__(self, executable):
|
def __init__(self, executable):
|
||||||
self._executable = executable
|
self._executable = executable
|
||||||
self._evaluator_deletion_queue = queue.deque()
|
self._evaluator_deletion_queue = queue.deque()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
pid = os.getpid()
|
||||||
|
return '<%s _executable=%r, _pickle_protocol=%r, is_crashed=%r, pid=%r>' % (
|
||||||
|
self.__class__.__name__,
|
||||||
|
self._executable,
|
||||||
|
self._pickle_protocol,
|
||||||
|
self.is_crashed,
|
||||||
|
pid,
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@memoize_method
|
@memoize_method
|
||||||
def _process(self):
|
def _process(self):
|
||||||
|
debug.dbg('Start environment subprocess %s', self._executable)
|
||||||
parso_path = sys.modules['parso'].__file__
|
parso_path = sys.modules['parso'].__file__
|
||||||
args = (
|
args = (
|
||||||
self._executable,
|
self._executable,
|
||||||
_MAIN_PATH,
|
_MAIN_PATH,
|
||||||
os.path.dirname(os.path.dirname(parso_path))
|
os.path.dirname(os.path.dirname(parso_path)),
|
||||||
|
'.'.join(str(x) for x in sys.version_info[:3]),
|
||||||
)
|
)
|
||||||
return GeneralizedPopen(
|
process = GeneralizedPopen(
|
||||||
args,
|
args,
|
||||||
stdin=subprocess.PIPE,
|
stdin=subprocess.PIPE,
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
# Use system default buffering on Python 2 to improve performance
|
||||||
|
# (this is already the case on Python 3).
|
||||||
|
bufsize=-1
|
||||||
)
|
)
|
||||||
|
self._stderr_queue = Queue()
|
||||||
|
self._stderr_thread = t = Thread(
|
||||||
|
target=_enqueue_output,
|
||||||
|
args=(process.stderr, self._stderr_queue)
|
||||||
|
)
|
||||||
|
t.daemon = True
|
||||||
|
t.start()
|
||||||
|
return process
|
||||||
|
|
||||||
def run(self, evaluator, function, args=(), kwargs={}):
|
def run(self, evaluator, function, args=(), kwargs={}):
|
||||||
# Delete old evaluators.
|
# Delete old evaluators.
|
||||||
@@ -160,24 +201,23 @@ class _CompiledSubprocess(object):
|
|||||||
def get_sys_path(self):
|
def get_sys_path(self):
|
||||||
return self._send(None, functions.get_sys_path, (), {})
|
return self._send(None, functions.get_sys_path, (), {})
|
||||||
|
|
||||||
def kill(self):
|
def _kill(self):
|
||||||
self._crashed = True
|
self.is_crashed = True
|
||||||
try:
|
try:
|
||||||
subprocess = _subprocesses[self._executable]
|
self._process.kill()
|
||||||
except KeyError:
|
self._process.wait()
|
||||||
# Fine it was already removed from the cache.
|
except (AttributeError, TypeError):
|
||||||
|
# If the Python process is terminating, it will remove some modules
|
||||||
|
# earlier than others and in general it's unclear how to deal with
|
||||||
|
# that so we just ignore the exceptions here.
|
||||||
pass
|
pass
|
||||||
else:
|
|
||||||
# In the `!=` case there is already a new subprocess in place
|
|
||||||
# and we don't need to do anything here anymore.
|
|
||||||
if subprocess == self:
|
|
||||||
del _subprocesses[self._executable]
|
|
||||||
|
|
||||||
self._process.kill()
|
def __del__(self):
|
||||||
self._process.wait()
|
if not self.is_crashed:
|
||||||
|
self._kill()
|
||||||
|
|
||||||
def _send(self, evaluator_id, function, args=(), kwargs={}):
|
def _send(self, evaluator_id, function, args=(), kwargs={}):
|
||||||
if self._crashed:
|
if self.is_crashed:
|
||||||
raise InternalError("The subprocess %s has crashed." % self._executable)
|
raise InternalError("The subprocess %s has crashed." % self._executable)
|
||||||
|
|
||||||
if not is_py3:
|
if not is_py3:
|
||||||
@@ -186,7 +226,7 @@ class _CompiledSubprocess(object):
|
|||||||
|
|
||||||
data = evaluator_id, function, args, kwargs
|
data = evaluator_id, function, args, kwargs
|
||||||
try:
|
try:
|
||||||
pickle_dump(data, self._process.stdin)
|
pickle_dump(data, self._process.stdin, self._pickle_protocol)
|
||||||
except (socket.error, IOError) as e:
|
except (socket.error, IOError) as e:
|
||||||
# Once Python2 will be removed we can just use `BrokenPipeError`.
|
# Once Python2 will be removed we can just use `BrokenPipeError`.
|
||||||
# Also, somehow in windows it returns EINVAL instead of EPIPE if
|
# Also, somehow in windows it returns EINVAL instead of EPIPE if
|
||||||
@@ -194,15 +234,27 @@ class _CompiledSubprocess(object):
|
|||||||
if e.errno not in (errno.EPIPE, errno.EINVAL):
|
if e.errno not in (errno.EPIPE, errno.EINVAL):
|
||||||
# Not a broken pipe
|
# Not a broken pipe
|
||||||
raise
|
raise
|
||||||
self.kill()
|
self._kill()
|
||||||
raise InternalError("The subprocess %s was killed. Maybe out of memory?"
|
raise InternalError("The subprocess %s was killed. Maybe out of memory?"
|
||||||
% self._executable)
|
% self._executable)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
is_exception, traceback, result = pickle_load(self._process.stdout)
|
is_exception, traceback, result = pickle_load(self._process.stdout)
|
||||||
except EOFError:
|
except EOFError as eof_error:
|
||||||
self.kill()
|
try:
|
||||||
raise InternalError("The subprocess %s has crashed." % self._executable)
|
stderr = self._process.stderr.read().decode('utf-8', 'replace')
|
||||||
|
except Exception as exc:
|
||||||
|
stderr = '<empty/not available (%r)>' % exc
|
||||||
|
self._kill()
|
||||||
|
_add_stderr_to_debug(self._stderr_queue)
|
||||||
|
raise InternalError(
|
||||||
|
"The subprocess %s has crashed (%r, stderr=%s)." % (
|
||||||
|
self._executable,
|
||||||
|
eof_error,
|
||||||
|
stderr,
|
||||||
|
))
|
||||||
|
|
||||||
|
_add_stderr_to_debug(self._stderr_queue)
|
||||||
|
|
||||||
if is_exception:
|
if is_exception:
|
||||||
# Replace the attribute error message with a the traceback. It's
|
# Replace the attribute error message with a the traceback. It's
|
||||||
@@ -223,11 +275,12 @@ class _CompiledSubprocess(object):
|
|||||||
|
|
||||||
|
|
||||||
class Listener(object):
|
class Listener(object):
|
||||||
def __init__(self):
|
def __init__(self, pickle_protocol):
|
||||||
self._evaluators = {}
|
self._evaluators = {}
|
||||||
# TODO refactor so we don't need to process anymore just handle
|
# TODO refactor so we don't need to process anymore just handle
|
||||||
# controlling.
|
# controlling.
|
||||||
self._process = _EvaluatorProcess(Listener)
|
self._process = _EvaluatorProcess(Listener)
|
||||||
|
self._pickle_protocol = pickle_protocol
|
||||||
|
|
||||||
def _get_evaluator(self, function, evaluator_id):
|
def _get_evaluator(self, function, evaluator_id):
|
||||||
from jedi.evaluate import Evaluator
|
from jedi.evaluate import Evaluator
|
||||||
@@ -266,29 +319,33 @@ class Listener(object):
|
|||||||
|
|
||||||
def listen(self):
|
def listen(self):
|
||||||
stdout = sys.stdout
|
stdout = sys.stdout
|
||||||
# Mute stdout/stderr. Nobody should actually be able to write to those,
|
# Mute stdout. Nobody should actually be able to write to it,
|
||||||
# because stdout is used for IPC and stderr will just be annoying if it
|
# because stdout is used for IPC.
|
||||||
# leaks (on module imports).
|
|
||||||
sys.stdout = open(os.devnull, 'w')
|
sys.stdout = open(os.devnull, 'w')
|
||||||
sys.stderr = open(os.devnull, 'w')
|
|
||||||
stdin = sys.stdin
|
stdin = sys.stdin
|
||||||
if sys.version_info[0] > 2:
|
if sys.version_info[0] > 2:
|
||||||
stdout = stdout.buffer
|
stdout = stdout.buffer
|
||||||
stdin = stdin.buffer
|
stdin = stdin.buffer
|
||||||
|
# Python 2 opens streams in text mode on Windows. Set stdout and stdin
|
||||||
|
# to binary mode.
|
||||||
|
elif sys.platform == 'win32':
|
||||||
|
import msvcrt
|
||||||
|
msvcrt.setmode(stdout.fileno(), os.O_BINARY)
|
||||||
|
msvcrt.setmode(stdin.fileno(), os.O_BINARY)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
payload = pickle_load(stdin)
|
payload = pickle_load(stdin)
|
||||||
except EOFError:
|
except EOFError:
|
||||||
# It looks like the parent process closed. Don't make a big fuss
|
# It looks like the parent process closed.
|
||||||
# here and just exit.
|
# Don't make a big fuss here and just exit.
|
||||||
exit(1)
|
exit(0)
|
||||||
try:
|
try:
|
||||||
result = False, None, self._run(*payload)
|
result = False, None, self._run(*payload)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
result = True, traceback.format_exc(), e
|
result = True, traceback.format_exc(), e
|
||||||
|
|
||||||
pickle_dump(result, file=stdout)
|
pickle_dump(result, stdout, self._pickle_protocol)
|
||||||
|
|
||||||
|
|
||||||
class AccessHandle(object):
|
class AccessHandle(object):
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import sys
|
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
def _get_paths():
|
def _get_paths():
|
||||||
@@ -45,5 +45,11 @@ else:
|
|||||||
load('jedi')
|
load('jedi')
|
||||||
from jedi.evaluate.compiled import subprocess # NOQA
|
from jedi.evaluate.compiled import subprocess # NOQA
|
||||||
|
|
||||||
|
from jedi._compatibility import highest_pickle_protocol # noqa: E402
|
||||||
|
|
||||||
|
|
||||||
|
# Retrieve the pickle protocol.
|
||||||
|
host_sys_version = [int(x) for x in sys.argv[2].split('.')]
|
||||||
|
pickle_protocol = highest_pickle_protocol([sys.version_info, host_sys_version])
|
||||||
# And finally start the client.
|
# And finally start the client.
|
||||||
subprocess.Listener().listen()
|
subprocess.Listener(pickle_protocol=pickle_protocol).listen()
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ def get_module_info(evaluator, sys_path=None, full_name=None, **kwargs):
|
|||||||
|
|
||||||
def list_module_names(evaluator, search_path):
|
def list_module_names(evaluator, search_path):
|
||||||
return [
|
return [
|
||||||
name
|
force_unicode(name)
|
||||||
for module_loader, name, is_pkg in iter_modules(search_path)
|
for module_loader, name, is_pkg in iter_modules(search_path)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -38,17 +38,9 @@ class LambdaName(AbstractNameDefinition):
|
|||||||
return ContextSet(self._lambda_context)
|
return ContextSet(self._lambda_context)
|
||||||
|
|
||||||
|
|
||||||
class FunctionContext(use_metaclass(CachedMetaClass, TreeContext)):
|
class AbstractFunction(TreeContext):
|
||||||
"""
|
|
||||||
Needed because of decorators. Decorators are evaluated here.
|
|
||||||
"""
|
|
||||||
api_type = u'function'
|
api_type = u'function'
|
||||||
|
|
||||||
def __init__(self, evaluator, parent_context, funcdef):
|
|
||||||
""" This should not be called directly """
|
|
||||||
super(FunctionContext, self).__init__(evaluator, parent_context)
|
|
||||||
self.tree_node = funcdef
|
|
||||||
|
|
||||||
def get_filters(self, search_global, until_position=None, origin_scope=None):
|
def get_filters(self, search_global, until_position=None, origin_scope=None):
|
||||||
if search_global:
|
if search_global:
|
||||||
yield ParserTreeFilter(
|
yield ParserTreeFilter(
|
||||||
@@ -62,6 +54,24 @@ class FunctionContext(use_metaclass(CachedMetaClass, TreeContext)):
|
|||||||
for filter in scope.get_filters(search_global=False, origin_scope=origin_scope):
|
for filter in scope.get_filters(search_global=False, origin_scope=origin_scope):
|
||||||
yield filter
|
yield filter
|
||||||
|
|
||||||
|
def get_param_names(self):
|
||||||
|
function_execution = self.get_function_execution()
|
||||||
|
return [ParamName(function_execution, param.name)
|
||||||
|
for param in self.tree_node.get_params()]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self):
|
||||||
|
if self.tree_node.type == 'lambdef':
|
||||||
|
return LambdaName(self)
|
||||||
|
return ContextName(self, self.tree_node.name)
|
||||||
|
|
||||||
|
def get_function_execution(self, arguments=None):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def py__call__(self, arguments):
|
||||||
|
function_execution = self.get_function_execution(arguments)
|
||||||
|
return self.infer_function_execution(function_execution)
|
||||||
|
|
||||||
def infer_function_execution(self, function_execution):
|
def infer_function_execution(self, function_execution):
|
||||||
"""
|
"""
|
||||||
Created to be used by inheritance.
|
Created to be used by inheritance.
|
||||||
@@ -84,35 +94,31 @@ class FunctionContext(use_metaclass(CachedMetaClass, TreeContext)):
|
|||||||
else:
|
else:
|
||||||
return function_execution.get_return_values()
|
return function_execution.get_return_values()
|
||||||
|
|
||||||
|
def py__name__(self):
|
||||||
|
return self.name.string_name
|
||||||
|
|
||||||
|
|
||||||
|
class FunctionContext(use_metaclass(CachedMetaClass, AbstractFunction)):
|
||||||
|
"""
|
||||||
|
Needed because of decorators. Decorators are evaluated here.
|
||||||
|
"""
|
||||||
|
@classmethod
|
||||||
|
def from_context(cls, context, tree_node):
|
||||||
|
from jedi.evaluate.context import AbstractInstanceContext
|
||||||
|
|
||||||
|
while context.is_class() or isinstance(context, AbstractInstanceContext):
|
||||||
|
context = context.parent_context
|
||||||
|
|
||||||
|
return cls(context.evaluator, parent_context=context, tree_node=tree_node)
|
||||||
|
|
||||||
def get_function_execution(self, arguments=None):
|
def get_function_execution(self, arguments=None):
|
||||||
if arguments is None:
|
if arguments is None:
|
||||||
arguments = AnonymousArguments()
|
arguments = AnonymousArguments()
|
||||||
|
|
||||||
return FunctionExecutionContext(self.evaluator, self.parent_context, self, arguments)
|
return FunctionExecutionContext(self.evaluator, self.parent_context, self, arguments)
|
||||||
|
|
||||||
def py__call__(self, arguments):
|
|
||||||
function_execution = self.get_function_execution(arguments)
|
|
||||||
return self.infer_function_execution(function_execution)
|
|
||||||
|
|
||||||
def py__class__(self):
|
def py__class__(self):
|
||||||
# This differentiation is only necessary for Python2. Python3 does not
|
return compiled.get_special_object(self.evaluator, u'FUNCTION_CLASS')
|
||||||
# use a different method class.
|
|
||||||
if isinstance(parser_utils.get_parent_scope(self.tree_node), tree.Class):
|
|
||||||
name = u'METHOD_CLASS'
|
|
||||||
else:
|
|
||||||
name = u'FUNCTION_CLASS'
|
|
||||||
return compiled.get_special_object(self.evaluator, name)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def name(self):
|
|
||||||
if self.tree_node.type == 'lambdef':
|
|
||||||
return LambdaName(self)
|
|
||||||
return ContextName(self, self.tree_node.name)
|
|
||||||
|
|
||||||
def get_param_names(self):
|
|
||||||
function_execution = self.get_function_execution()
|
|
||||||
return [ParamName(function_execution, param.name)
|
|
||||||
for param in self.tree_node.get_params()]
|
|
||||||
|
|
||||||
|
|
||||||
class FunctionExecutionContext(TreeContext):
|
class FunctionExecutionContext(TreeContext):
|
||||||
@@ -127,9 +133,12 @@ class FunctionExecutionContext(TreeContext):
|
|||||||
function_execution_filter = FunctionExecutionFilter
|
function_execution_filter = FunctionExecutionFilter
|
||||||
|
|
||||||
def __init__(self, evaluator, parent_context, function_context, var_args):
|
def __init__(self, evaluator, parent_context, function_context, var_args):
|
||||||
super(FunctionExecutionContext, self).__init__(evaluator, parent_context)
|
super(FunctionExecutionContext, self).__init__(
|
||||||
|
evaluator,
|
||||||
|
parent_context,
|
||||||
|
function_context.tree_node,
|
||||||
|
)
|
||||||
self.function_context = function_context
|
self.function_context = function_context
|
||||||
self.tree_node = function_context.tree_node
|
|
||||||
self.var_args = var_args
|
self.var_args = var_args
|
||||||
|
|
||||||
@evaluator_method_cache(default=NO_CONTEXTS)
|
@evaluator_method_cache(default=NO_CONTEXTS)
|
||||||
@@ -240,5 +249,5 @@ class FunctionExecutionContext(TreeContext):
|
|||||||
origin_scope=origin_scope)
|
origin_scope=origin_scope)
|
||||||
|
|
||||||
@evaluator_method_cache()
|
@evaluator_method_cache()
|
||||||
def get_params(self):
|
def get_executed_params(self):
|
||||||
return self.var_args.get_params(self)
|
return self.var_args.get_executed_params(self)
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
from abc import abstractproperty
|
from abc import abstractproperty
|
||||||
|
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
|
from jedi import settings
|
||||||
from jedi.evaluate import compiled
|
from jedi.evaluate import compiled
|
||||||
from jedi.evaluate import filters
|
from jedi.evaluate import filters
|
||||||
from jedi.evaluate.base_context import Context, NO_CONTEXTS, ContextSet, \
|
from jedi.evaluate.base_context import Context, NO_CONTEXTS, ContextSet, \
|
||||||
@@ -8,34 +9,40 @@ from jedi.evaluate.base_context import Context, NO_CONTEXTS, ContextSet, \
|
|||||||
from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts
|
from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts
|
||||||
from jedi.evaluate.cache import evaluator_method_cache
|
from jedi.evaluate.cache import evaluator_method_cache
|
||||||
from jedi.evaluate.arguments import AbstractArguments, AnonymousArguments
|
from jedi.evaluate.arguments import AbstractArguments, AnonymousArguments
|
||||||
from jedi.cache import memoize_method
|
from jedi.evaluate.context.function import FunctionExecutionContext, \
|
||||||
from jedi.evaluate.context.function import FunctionExecutionContext, FunctionContext
|
FunctionContext, AbstractFunction
|
||||||
from jedi.evaluate.context.klass import ClassContext, apply_py__get__
|
from jedi.evaluate.context.klass import ClassContext, apply_py__get__, ClassFilter
|
||||||
from jedi.evaluate.context import iterable
|
from jedi.evaluate.context import iterable
|
||||||
from jedi.parser_utils import get_parent_scope
|
from jedi.parser_utils import get_parent_scope
|
||||||
|
|
||||||
|
|
||||||
class BaseInstanceFunctionExecution(FunctionExecutionContext):
|
class InstanceExecutedParam(object):
|
||||||
def __init__(self, instance, *args, **kwargs):
|
def __init__(self, instance):
|
||||||
self.instance = instance
|
self._instance = instance
|
||||||
super(BaseInstanceFunctionExecution, self).__init__(
|
|
||||||
instance.evaluator, *args, **kwargs)
|
def infer(self):
|
||||||
|
return ContextSet(self._instance)
|
||||||
|
|
||||||
|
|
||||||
class InstanceFunctionExecution(BaseInstanceFunctionExecution):
|
class AnonymousInstanceArguments(AnonymousArguments):
|
||||||
def __init__(self, instance, parent_context, function_context, var_args):
|
def __init__(self, instance):
|
||||||
var_args = InstanceVarArgs(self, var_args)
|
self._instance = instance
|
||||||
|
|
||||||
super(InstanceFunctionExecution, self).__init__(
|
def get_executed_params(self, execution_context):
|
||||||
instance, parent_context, function_context, var_args)
|
from jedi.evaluate.dynamic import search_params
|
||||||
|
self_param = InstanceExecutedParam(self._instance)
|
||||||
|
tree_params = execution_context.tree_node.get_params()
|
||||||
class AnonymousInstanceFunctionExecution(BaseInstanceFunctionExecution):
|
if len(tree_params) == 1:
|
||||||
function_execution_filter = filters.AnonymousInstanceFunctionExecutionFilter
|
# If the only param is self, we don't need to try to find
|
||||||
|
# executions of this function, we have all the params already.
|
||||||
def __init__(self, instance, parent_context, function_context, var_args):
|
return [self_param]
|
||||||
super(AnonymousInstanceFunctionExecution, self).__init__(
|
executed_params = list(search_params(
|
||||||
instance, parent_context, function_context, var_args)
|
execution_context.evaluator,
|
||||||
|
execution_context,
|
||||||
|
execution_context.tree_node
|
||||||
|
))
|
||||||
|
executed_params[0] = self_param
|
||||||
|
return executed_params
|
||||||
|
|
||||||
|
|
||||||
class AbstractInstanceContext(Context):
|
class AbstractInstanceContext(Context):
|
||||||
@@ -43,7 +50,6 @@ class AbstractInstanceContext(Context):
|
|||||||
This class is used to evaluate instances.
|
This class is used to evaluate instances.
|
||||||
"""
|
"""
|
||||||
api_type = u'instance'
|
api_type = u'instance'
|
||||||
function_execution_cls = InstanceFunctionExecution
|
|
||||||
|
|
||||||
def __init__(self, evaluator, parent_context, class_context, var_args):
|
def __init__(self, evaluator, parent_context, class_context, var_args):
|
||||||
super(AbstractInstanceContext, self).__init__(evaluator, parent_context)
|
super(AbstractInstanceContext, self).__init__(evaluator, parent_context)
|
||||||
@@ -63,7 +69,7 @@ class AbstractInstanceContext(Context):
|
|||||||
raise AttributeError
|
raise AttributeError
|
||||||
|
|
||||||
def execute(arguments):
|
def execute(arguments):
|
||||||
return ContextSet.from_sets(name.execute(arguments) for name in names)
|
return ContextSet.from_sets(name.infer().execute(arguments) for name in names)
|
||||||
|
|
||||||
return execute
|
return execute
|
||||||
|
|
||||||
@@ -86,7 +92,7 @@ class AbstractInstanceContext(Context):
|
|||||||
|
|
||||||
def execute_function_slots(self, names, *evaluated_args):
|
def execute_function_slots(self, names, *evaluated_args):
|
||||||
return ContextSet.from_sets(
|
return ContextSet.from_sets(
|
||||||
name.execute_evaluated(*evaluated_args)
|
name.infer().execute_evaluated(*evaluated_args)
|
||||||
for name in names
|
for name in names
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -158,21 +164,18 @@ class AbstractInstanceContext(Context):
|
|||||||
def name(self):
|
def name(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def _create_init_execution(self, class_context, func_node):
|
def _create_init_execution(self, class_context, bound_method):
|
||||||
bound_method = BoundMethod(
|
return bound_method.get_function_execution(self.var_args)
|
||||||
self.evaluator, self, class_context, self.parent_context, func_node
|
|
||||||
)
|
|
||||||
return self.function_execution_cls(
|
|
||||||
self,
|
|
||||||
class_context.parent_context,
|
|
||||||
bound_method,
|
|
||||||
self.var_args
|
|
||||||
)
|
|
||||||
|
|
||||||
def create_init_executions(self):
|
def create_init_executions(self):
|
||||||
for name in self.get_function_slot_names(u'__init__'):
|
for name in self.get_function_slot_names(u'__init__'):
|
||||||
if isinstance(name, SelfName):
|
if isinstance(name, LazyInstanceClassName):
|
||||||
yield self._create_init_execution(name.class_context, name.tree_name.parent)
|
function = FunctionContext.from_context(
|
||||||
|
self.parent_context,
|
||||||
|
name.tree_name.parent
|
||||||
|
)
|
||||||
|
bound_method = BoundMethod(self, name.class_context, function)
|
||||||
|
yield self._create_init_execution(name.class_context, bound_method)
|
||||||
|
|
||||||
@evaluator_method_cache()
|
@evaluator_method_cache()
|
||||||
def create_instance_context(self, class_context, node):
|
def create_instance_context(self, class_context, node):
|
||||||
@@ -184,13 +187,14 @@ class AbstractInstanceContext(Context):
|
|||||||
else:
|
else:
|
||||||
parent_context = self.create_instance_context(class_context, scope)
|
parent_context = self.create_instance_context(class_context, scope)
|
||||||
if scope.type == 'funcdef':
|
if scope.type == 'funcdef':
|
||||||
|
func = FunctionContext.from_context(
|
||||||
|
parent_context,
|
||||||
|
scope,
|
||||||
|
)
|
||||||
|
bound_method = BoundMethod(self, class_context, func)
|
||||||
if scope.name.value == '__init__' and parent_context == class_context:
|
if scope.name.value == '__init__' and parent_context == class_context:
|
||||||
return self._create_init_execution(class_context, scope)
|
return self._create_init_execution(class_context, bound_method)
|
||||||
else:
|
else:
|
||||||
bound_method = BoundMethod(
|
|
||||||
self.evaluator, self, class_context,
|
|
||||||
parent_context, scope
|
|
||||||
)
|
|
||||||
return bound_method.get_function_execution()
|
return bound_method.get_function_execution()
|
||||||
elif scope.type == 'classdef':
|
elif scope.type == 'classdef':
|
||||||
class_context = ClassContext(self.evaluator, parent_context, scope)
|
class_context = ClassContext(self.evaluator, parent_context, scope)
|
||||||
@@ -208,16 +212,18 @@ class AbstractInstanceContext(Context):
|
|||||||
|
|
||||||
|
|
||||||
class CompiledInstance(AbstractInstanceContext):
|
class CompiledInstance(AbstractInstanceContext):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, evaluator, parent_context, class_context, var_args):
|
||||||
super(CompiledInstance, self).__init__(*args, **kwargs)
|
self._original_var_args = var_args
|
||||||
|
|
||||||
# I don't think that dynamic append lookups should happen here. That
|
# I don't think that dynamic append lookups should happen here. That
|
||||||
# sounds more like something that should go to py__iter__.
|
# sounds more like something that should go to py__iter__.
|
||||||
self._original_var_args = self.var_args
|
if class_context.py__name__() in ['list', 'set'] \
|
||||||
|
and parent_context.get_root_context() == evaluator.builtins_module:
|
||||||
if self.class_context.name.string_name in ['list', 'set'] \
|
|
||||||
and self.parent_context.get_root_context() == self.evaluator.builtins_module:
|
|
||||||
# compare the module path with the builtin name.
|
# compare the module path with the builtin name.
|
||||||
self.var_args = iterable.get_dynamic_array_instance(self)
|
if settings.dynamic_array_additions:
|
||||||
|
var_args = iterable.get_dynamic_array_instance(self, var_args)
|
||||||
|
|
||||||
|
super(CompiledInstance, self).__init__(evaluator, parent_context, class_context, var_args)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def name(self):
|
def name(self):
|
||||||
@@ -249,35 +255,33 @@ class TreeInstance(AbstractInstanceContext):
|
|||||||
|
|
||||||
|
|
||||||
class AnonymousInstance(TreeInstance):
|
class AnonymousInstance(TreeInstance):
|
||||||
function_execution_cls = AnonymousInstanceFunctionExecution
|
|
||||||
|
|
||||||
def __init__(self, evaluator, parent_context, class_context):
|
def __init__(self, evaluator, parent_context, class_context):
|
||||||
super(AnonymousInstance, self).__init__(
|
super(AnonymousInstance, self).__init__(
|
||||||
evaluator,
|
evaluator,
|
||||||
parent_context,
|
parent_context,
|
||||||
class_context,
|
class_context,
|
||||||
var_args=AnonymousArguments(),
|
var_args=AnonymousInstanceArguments(self),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class CompiledInstanceName(compiled.CompiledName):
|
class CompiledInstanceName(compiled.CompiledName):
|
||||||
def __init__(self, evaluator, instance, parent_context, name):
|
|
||||||
super(CompiledInstanceName, self).__init__(evaluator, parent_context, name)
|
def __init__(self, evaluator, instance, klass, name):
|
||||||
|
super(CompiledInstanceName, self).__init__(
|
||||||
|
evaluator,
|
||||||
|
klass.parent_context,
|
||||||
|
name.string_name
|
||||||
|
)
|
||||||
self._instance = instance
|
self._instance = instance
|
||||||
|
self._class = klass
|
||||||
|
self._class_member_name = name
|
||||||
|
|
||||||
@iterator_to_context_set
|
@iterator_to_context_set
|
||||||
def infer(self):
|
def infer(self):
|
||||||
for result_context in super(CompiledInstanceName, self).infer():
|
for result_context in self._class_member_name.infer():
|
||||||
is_function = result_context.api_type == 'function'
|
is_function = result_context.api_type == 'function'
|
||||||
if result_context.tree_node is not None and is_function:
|
if result_context.tree_node is not None and is_function:
|
||||||
parent_context = result_context.parent_context
|
yield BoundMethod(self._instance, self._class, result_context)
|
||||||
while parent_context.is_class():
|
|
||||||
parent_context = parent_context.parent_context
|
|
||||||
|
|
||||||
yield BoundMethod(
|
|
||||||
result_context.evaluator, self._instance, self.parent_context,
|
|
||||||
parent_context, result_context.tree_node
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
if is_function:
|
if is_function:
|
||||||
yield CompiledBoundMethod(result_context)
|
yield CompiledBoundMethod(result_context)
|
||||||
@@ -285,36 +289,59 @@ class CompiledInstanceName(compiled.CompiledName):
|
|||||||
yield result_context
|
yield result_context
|
||||||
|
|
||||||
|
|
||||||
class CompiledInstanceClassFilter(compiled.CompiledObjectFilter):
|
class CompiledInstanceClassFilter(filters.AbstractFilter):
|
||||||
name_class = CompiledInstanceName
|
name_class = CompiledInstanceName
|
||||||
|
|
||||||
def __init__(self, evaluator, instance, compiled_object):
|
def __init__(self, evaluator, instance, klass):
|
||||||
super(CompiledInstanceClassFilter, self).__init__(
|
self._evaluator = evaluator
|
||||||
evaluator,
|
self._instance = instance
|
||||||
compiled_object,
|
self._class = klass
|
||||||
is_instance=True,
|
self._class_filter = next(klass.get_filters(is_instance=True))
|
||||||
|
|
||||||
|
def get(self, name):
|
||||||
|
return self._convert(self._class_filter.get(name))
|
||||||
|
|
||||||
|
def values(self):
|
||||||
|
return self._convert(self._class_filter.values())
|
||||||
|
|
||||||
|
def _convert(self, names):
|
||||||
|
return [
|
||||||
|
CompiledInstanceName(self._evaluator, self._instance, self._class, n)
|
||||||
|
for n in names
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class BoundMethod(AbstractFunction):
|
||||||
|
def __init__(self, instance, klass, function):
|
||||||
|
super(BoundMethod, self).__init__(
|
||||||
|
function.evaluator,
|
||||||
|
function.parent_context,
|
||||||
|
function.tree_node,
|
||||||
)
|
)
|
||||||
self._instance = instance
|
self._instance = instance
|
||||||
|
self._class = klass
|
||||||
|
self._function = function
|
||||||
|
|
||||||
def _create_name(self, name):
|
def py__class__(self):
|
||||||
return self.name_class(
|
return compiled.get_special_object(self.evaluator, u'BOUND_METHOD_CLASS')
|
||||||
self._evaluator, self._instance, self._compiled_object, name)
|
|
||||||
|
|
||||||
|
|
||||||
class BoundMethod(FunctionContext):
|
|
||||||
def __init__(self, evaluator, instance, class_context, *args, **kwargs):
|
|
||||||
super(BoundMethod, self).__init__(evaluator, *args, **kwargs)
|
|
||||||
self._instance = instance
|
|
||||||
self._class_context = class_context
|
|
||||||
|
|
||||||
def get_function_execution(self, arguments=None):
|
def get_function_execution(self, arguments=None):
|
||||||
if arguments is None:
|
if arguments is None:
|
||||||
arguments = AnonymousArguments()
|
arguments = AnonymousInstanceArguments(self._instance)
|
||||||
return AnonymousInstanceFunctionExecution(
|
|
||||||
self._instance, self.parent_context, self, arguments)
|
arguments = InstanceArguments(self._instance, arguments)
|
||||||
else:
|
|
||||||
return InstanceFunctionExecution(
|
if isinstance(self._function, compiled.CompiledObject):
|
||||||
self._instance, self.parent_context, self, arguments)
|
# This is kind of weird, because it's coming from a compiled object
|
||||||
|
# and we're not sure if we want that in the future.
|
||||||
|
return FunctionExecutionContext(
|
||||||
|
self.evaluator, self.parent_context, self, arguments
|
||||||
|
)
|
||||||
|
|
||||||
|
return self._function.get_function_execution(arguments)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<%s: %s>' % (self.__class__.__name__, self._function)
|
||||||
|
|
||||||
|
|
||||||
class CompiledBoundMethod(compiled.CompiledObject):
|
class CompiledBoundMethod(compiled.CompiledObject):
|
||||||
@@ -326,11 +353,6 @@ class CompiledBoundMethod(compiled.CompiledObject):
|
|||||||
return list(super(CompiledBoundMethod, self).get_param_names())[1:]
|
return list(super(CompiledBoundMethod, self).get_param_names())[1:]
|
||||||
|
|
||||||
|
|
||||||
class InstanceNameDefinition(filters.TreeNameDefinition):
|
|
||||||
def infer(self):
|
|
||||||
return super(InstanceNameDefinition, self).infer()
|
|
||||||
|
|
||||||
|
|
||||||
class SelfName(filters.TreeNameDefinition):
|
class SelfName(filters.TreeNameDefinition):
|
||||||
"""
|
"""
|
||||||
This name calculates the parent_context lazily.
|
This name calculates the parent_context lazily.
|
||||||
@@ -345,65 +367,69 @@ class SelfName(filters.TreeNameDefinition):
|
|||||||
return self._instance.create_instance_context(self.class_context, self.tree_name)
|
return self._instance.create_instance_context(self.class_context, self.tree_name)
|
||||||
|
|
||||||
|
|
||||||
class LazyInstanceClassName(SelfName):
|
class LazyInstanceClassName(object):
|
||||||
|
def __init__(self, instance, class_context, class_member_name):
|
||||||
|
self._instance = instance
|
||||||
|
self.class_context = class_context
|
||||||
|
self._class_member_name = class_member_name
|
||||||
|
|
||||||
@iterator_to_context_set
|
@iterator_to_context_set
|
||||||
def infer(self):
|
def infer(self):
|
||||||
for result_context in super(LazyInstanceClassName, self).infer():
|
for result_context in self._class_member_name.infer():
|
||||||
if isinstance(result_context, FunctionContext):
|
if isinstance(result_context, FunctionContext):
|
||||||
# Classes are never used to resolve anything within the
|
# Classes are never used to resolve anything within the
|
||||||
# functions. Only other functions and modules will resolve
|
# functions. Only other functions and modules will resolve
|
||||||
# those things.
|
# those things.
|
||||||
parent_context = result_context.parent_context
|
yield BoundMethod(self._instance, self.class_context, result_context)
|
||||||
while parent_context.is_class():
|
|
||||||
parent_context = parent_context.parent_context
|
|
||||||
|
|
||||||
yield BoundMethod(
|
|
||||||
result_context.evaluator, self._instance, self.class_context,
|
|
||||||
parent_context, result_context.tree_node
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
for c in apply_py__get__(result_context, self._instance):
|
for c in apply_py__get__(result_context, self._instance):
|
||||||
yield c
|
yield c
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return getattr(self._class_member_name, name)
|
||||||
|
|
||||||
class InstanceClassFilter(filters.ParserTreeFilter):
|
|
||||||
name_class = LazyInstanceClassName
|
|
||||||
|
|
||||||
|
class InstanceClassFilter(filters.AbstractFilter):
|
||||||
|
"""
|
||||||
|
This filter is special in that it uses the class filter and wraps the
|
||||||
|
resulting names in LazyINstanceClassName. The idea is that the class name
|
||||||
|
filtering can be very flexible and always be reflected in instances.
|
||||||
|
"""
|
||||||
def __init__(self, evaluator, context, class_context, origin_scope):
|
def __init__(self, evaluator, context, class_context, origin_scope):
|
||||||
super(InstanceClassFilter, self).__init__(
|
self._instance = context
|
||||||
evaluator=evaluator,
|
|
||||||
context=context,
|
|
||||||
node_context=class_context,
|
|
||||||
origin_scope=origin_scope
|
|
||||||
)
|
|
||||||
self._class_context = class_context
|
self._class_context = class_context
|
||||||
|
self._class_filter = next(class_context.get_filters(
|
||||||
|
search_global=False,
|
||||||
|
origin_scope=origin_scope,
|
||||||
|
is_instance=True,
|
||||||
|
))
|
||||||
|
|
||||||
def _equals_origin_scope(self):
|
def get(self, name):
|
||||||
node = self._origin_scope
|
return self._convert(self._class_filter.get(name))
|
||||||
while node is not None:
|
|
||||||
if node == self._parser_scope or node == self.context:
|
|
||||||
return True
|
|
||||||
node = get_parent_scope(node)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _access_possible(self, name):
|
def values(self):
|
||||||
return not name.value.startswith('__') or name.value.endswith('__') \
|
return self._convert(self._class_filter.values())
|
||||||
or self._equals_origin_scope()
|
|
||||||
|
|
||||||
def _filter(self, names):
|
def _convert(self, names):
|
||||||
names = super(InstanceClassFilter, self)._filter(names)
|
return [LazyInstanceClassName(self._instance, self._class_context, n) for n in names]
|
||||||
return [name for name in names if self._access_possible(name)]
|
|
||||||
|
|
||||||
def _convert_names(self, names):
|
|
||||||
return [self.name_class(self.context, self._class_context, name) for name in names]
|
|
||||||
|
|
||||||
|
|
||||||
class SelfAttributeFilter(InstanceClassFilter):
|
class SelfAttributeFilter(ClassFilter):
|
||||||
"""
|
"""
|
||||||
This class basically filters all the use cases where `self.*` was assigned.
|
This class basically filters all the use cases where `self.*` was assigned.
|
||||||
"""
|
"""
|
||||||
name_class = SelfName
|
name_class = SelfName
|
||||||
|
|
||||||
|
def __init__(self, evaluator, context, class_context, origin_scope):
|
||||||
|
super(SelfAttributeFilter, self).__init__(
|
||||||
|
evaluator=evaluator,
|
||||||
|
context=context,
|
||||||
|
node_context=class_context,
|
||||||
|
origin_scope=origin_scope,
|
||||||
|
is_instance=True,
|
||||||
|
)
|
||||||
|
self._class_context = class_context
|
||||||
|
|
||||||
def _filter(self, names):
|
def _filter(self, names):
|
||||||
names = self._filter_self_names(names)
|
names = self._filter_self_names(names)
|
||||||
if isinstance(self._parser_scope, compiled.CompiledObject) and False:
|
if isinstance(self._parser_scope, compiled.CompiledObject) and False:
|
||||||
@@ -422,19 +448,18 @@ class SelfAttributeFilter(InstanceClassFilter):
|
|||||||
if name.is_definition() and self._access_possible(name):
|
if name.is_definition() and self._access_possible(name):
|
||||||
yield name
|
yield name
|
||||||
|
|
||||||
|
def _convert_names(self, names):
|
||||||
|
return [self.name_class(self.context, self._class_context, name) for name in names]
|
||||||
|
|
||||||
def _check_flows(self, names):
|
def _check_flows(self, names):
|
||||||
return names
|
return names
|
||||||
|
|
||||||
|
|
||||||
class InstanceVarArgs(AbstractArguments):
|
class InstanceArguments(AbstractArguments):
|
||||||
def __init__(self, execution_context, var_args):
|
def __init__(self, instance, var_args):
|
||||||
self._execution_context = execution_context
|
self.instance = instance
|
||||||
self._var_args = var_args
|
self._var_args = var_args
|
||||||
|
|
||||||
@memoize_method
|
|
||||||
def _get_var_args(self):
|
|
||||||
return self._var_args
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def argument_node(self):
|
def argument_node(self):
|
||||||
return self._var_args.argument_node
|
return self._var_args.argument_node
|
||||||
@@ -444,9 +469,15 @@ class InstanceVarArgs(AbstractArguments):
|
|||||||
return self._var_args.trailer
|
return self._var_args.trailer
|
||||||
|
|
||||||
def unpack(self, func=None):
|
def unpack(self, func=None):
|
||||||
yield None, LazyKnownContext(self._execution_context.instance)
|
yield None, LazyKnownContext(self.instance)
|
||||||
for values in self._get_var_args().unpack(func):
|
for values in self._var_args.unpack(func):
|
||||||
yield values
|
yield values
|
||||||
|
|
||||||
def get_calling_nodes(self):
|
def get_calling_nodes(self):
|
||||||
return self._get_var_args().get_calling_nodes()
|
return self._var_args.get_calling_nodes()
|
||||||
|
|
||||||
|
def get_executed_params(self, execution_context):
|
||||||
|
if isinstance(self._var_args, AnonymousInstanceArguments):
|
||||||
|
return self._var_args.get_executed_params(execution_context)
|
||||||
|
|
||||||
|
return super(InstanceArguments, self).get_executed_params(execution_context)
|
||||||
|
|||||||
@@ -30,7 +30,8 @@ from jedi.evaluate import recursion
|
|||||||
from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \
|
from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \
|
||||||
LazyTreeContext
|
LazyTreeContext
|
||||||
from jedi.evaluate.helpers import get_int_or_none, is_string, \
|
from jedi.evaluate.helpers import get_int_or_none, is_string, \
|
||||||
predefine_names, evaluate_call_of_leaf
|
predefine_names, evaluate_call_of_leaf, reraise_as_evaluator, \
|
||||||
|
EvaluatorKeyError
|
||||||
from jedi.evaluate.utils import safe_property
|
from jedi.evaluate.utils import safe_property
|
||||||
from jedi.evaluate.utils import to_list
|
from jedi.evaluate.utils import to_list
|
||||||
from jedi.evaluate.cache import evaluator_method_cache
|
from jedi.evaluate.cache import evaluator_method_cache
|
||||||
@@ -82,10 +83,6 @@ class CompForContext(TreeContext):
|
|||||||
def from_comp_for(cls, parent_context, comp_for):
|
def from_comp_for(cls, parent_context, comp_for):
|
||||||
return cls(parent_context.evaluator, parent_context, comp_for)
|
return cls(parent_context.evaluator, parent_context, comp_for)
|
||||||
|
|
||||||
def __init__(self, evaluator, parent_context, comp_for):
|
|
||||||
super(CompForContext, self).__init__(evaluator, parent_context)
|
|
||||||
self.tree_node = comp_for
|
|
||||||
|
|
||||||
def get_node(self):
|
def get_node(self):
|
||||||
return self.tree_node
|
return self.tree_node
|
||||||
|
|
||||||
@@ -219,7 +216,9 @@ class ListComprehension(ComprehensionMixin, Sequence):
|
|||||||
return ContextSet(self)
|
return ContextSet(self)
|
||||||
|
|
||||||
all_types = list(self.py__iter__())
|
all_types = list(self.py__iter__())
|
||||||
return all_types[index].infer()
|
with reraise_as_evaluator(IndexError, TypeError):
|
||||||
|
lazy_context = all_types[index]
|
||||||
|
return lazy_context.infer()
|
||||||
|
|
||||||
|
|
||||||
class SetComprehension(ComprehensionMixin, Sequence):
|
class SetComprehension(ComprehensionMixin, Sequence):
|
||||||
@@ -254,14 +253,24 @@ class DictComprehension(ComprehensionMixin, Sequence):
|
|||||||
|
|
||||||
@publish_method('items')
|
@publish_method('items')
|
||||||
def _imitate_items(self):
|
def _imitate_items(self):
|
||||||
items = ContextSet.from_iterable(
|
lazy_contexts = [
|
||||||
FakeSequence(
|
LazyKnownContext(
|
||||||
self.evaluator, u'tuple'
|
FakeSequence(
|
||||||
(LazyKnownContexts(keys), LazyKnownContexts(values))
|
self.evaluator,
|
||||||
) for keys, values in self._iterate()
|
u'tuple',
|
||||||
)
|
[LazyKnownContexts(key),
|
||||||
|
LazyKnownContexts(value)]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
for key, value in self._iterate()
|
||||||
|
]
|
||||||
|
|
||||||
return create_evaluated_sequence_set(self.evaluator, items, sequence_type=u'list')
|
return ContextSet(FakeSequence(self.evaluator, u'list', lazy_contexts))
|
||||||
|
|
||||||
|
def exact_key_items(self):
|
||||||
|
# NOTE: A smarter thing can probably done here to achieve better
|
||||||
|
# completions, but at least like this jedi doesn't crash
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
class GeneratorComprehension(ComprehensionMixin, GeneratorBase):
|
class GeneratorComprehension(ComprehensionMixin, GeneratorBase):
|
||||||
@@ -293,13 +302,15 @@ class SequenceLiteralContext(Sequence):
|
|||||||
if isinstance(k, compiled.CompiledObject) \
|
if isinstance(k, compiled.CompiledObject) \
|
||||||
and k.execute_operation(compiled_obj_index, u'==').get_safe_value():
|
and k.execute_operation(compiled_obj_index, u'==').get_safe_value():
|
||||||
return self._defining_context.eval_node(value)
|
return self._defining_context.eval_node(value)
|
||||||
raise KeyError('No key found in dictionary %s.' % self)
|
raise EvaluatorKeyError('No key found in dictionary %s.' % self)
|
||||||
|
|
||||||
# Can raise an IndexError
|
# Can raise an IndexError
|
||||||
if isinstance(index, slice):
|
if isinstance(index, slice):
|
||||||
return ContextSet(self)
|
return ContextSet(self)
|
||||||
else:
|
else:
|
||||||
return self._defining_context.eval_node(self._items()[index])
|
with reraise_as_evaluator(TypeError, KeyError, IndexError):
|
||||||
|
node = self._items()[index]
|
||||||
|
return self._defining_context.eval_node(node)
|
||||||
|
|
||||||
def py__iter__(self):
|
def py__iter__(self):
|
||||||
"""
|
"""
|
||||||
@@ -340,21 +351,39 @@ class SequenceLiteralContext(Sequence):
|
|||||||
return [] # Direct closing bracket, doesn't contain items.
|
return [] # Direct closing bracket, doesn't contain items.
|
||||||
|
|
||||||
if array_node.type == 'testlist_comp':
|
if array_node.type == 'testlist_comp':
|
||||||
return array_node.children[::2]
|
# filter out (for now) pep 448 single-star unpacking
|
||||||
|
return [value for value in array_node.children[::2]
|
||||||
|
if value.type != "star_expr"]
|
||||||
elif array_node.type == 'dictorsetmaker':
|
elif array_node.type == 'dictorsetmaker':
|
||||||
kv = []
|
kv = []
|
||||||
iterator = iter(array_node.children)
|
iterator = iter(array_node.children)
|
||||||
for key in iterator:
|
for key in iterator:
|
||||||
op = next(iterator, None)
|
if key == "**":
|
||||||
if op is None or op == ',':
|
# dict with pep 448 double-star unpacking
|
||||||
kv.append(key) # A set.
|
# for now ignoring the values imported by **
|
||||||
else:
|
next(iterator)
|
||||||
assert op == ':' # A dict.
|
|
||||||
kv.append((key, next(iterator)))
|
|
||||||
next(iterator, None) # Possible comma.
|
next(iterator, None) # Possible comma.
|
||||||
|
else:
|
||||||
|
op = next(iterator, None)
|
||||||
|
if op is None or op == ',':
|
||||||
|
if key.type == "star_expr":
|
||||||
|
# pep 448 single-star unpacking
|
||||||
|
# for now ignoring values imported by *
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
kv.append(key) # A set.
|
||||||
|
else:
|
||||||
|
assert op == ':' # A dict.
|
||||||
|
kv.append((key, next(iterator)))
|
||||||
|
next(iterator, None) # Possible comma.
|
||||||
return kv
|
return kv
|
||||||
else:
|
else:
|
||||||
return [array_node]
|
if array_node.type == "star_expr":
|
||||||
|
# pep 448 single-star unpacking
|
||||||
|
# for now ignoring values imported by *
|
||||||
|
return []
|
||||||
|
else:
|
||||||
|
return [array_node]
|
||||||
|
|
||||||
def exact_key_items(self):
|
def exact_key_items(self):
|
||||||
"""
|
"""
|
||||||
@@ -413,7 +442,9 @@ class FakeSequence(_FakeArray):
|
|||||||
self._lazy_context_list = lazy_context_list
|
self._lazy_context_list = lazy_context_list
|
||||||
|
|
||||||
def py__getitem__(self, index):
|
def py__getitem__(self, index):
|
||||||
return self._lazy_context_list[index].infer()
|
with reraise_as_evaluator(IndexError, TypeError):
|
||||||
|
lazy_context = self._lazy_context_list[index]
|
||||||
|
return lazy_context.infer()
|
||||||
|
|
||||||
def py__iter__(self):
|
def py__iter__(self):
|
||||||
return self._lazy_context_list
|
return self._lazy_context_list
|
||||||
@@ -450,7 +481,9 @@ class FakeDict(_FakeArray):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return self._dct[index].infer()
|
with reraise_as_evaluator(KeyError):
|
||||||
|
lazy_context = self._dct[index]
|
||||||
|
return lazy_context.infer()
|
||||||
|
|
||||||
@publish_method('values')
|
@publish_method('values')
|
||||||
def _values(self):
|
def _values(self):
|
||||||
@@ -620,12 +653,9 @@ def _check_array_additions(context, sequence):
|
|||||||
return added_types
|
return added_types
|
||||||
|
|
||||||
|
|
||||||
def get_dynamic_array_instance(instance):
|
def get_dynamic_array_instance(instance, arguments):
|
||||||
"""Used for set() and list() instances."""
|
"""Used for set() and list() instances."""
|
||||||
if not settings.dynamic_array_additions:
|
ai = _ArrayInstance(instance, arguments)
|
||||||
return instance.var_args
|
|
||||||
|
|
||||||
ai = _ArrayInstance(instance)
|
|
||||||
from jedi.evaluate import arguments
|
from jedi.evaluate import arguments
|
||||||
return arguments.ValuesArguments([ContextSet(ai)])
|
return arguments.ValuesArguments([ContextSet(ai)])
|
||||||
|
|
||||||
@@ -641,9 +671,9 @@ class _ArrayInstance(object):
|
|||||||
and therefore doesn't need filters, `py__bool__` and so on, because
|
and therefore doesn't need filters, `py__bool__` and so on, because
|
||||||
we don't use these operations in `builtins.py`.
|
we don't use these operations in `builtins.py`.
|
||||||
"""
|
"""
|
||||||
def __init__(self, instance):
|
def __init__(self, instance, var_args):
|
||||||
self.instance = instance
|
self.instance = instance
|
||||||
self.var_args = instance.var_args
|
self.var_args = var_args
|
||||||
|
|
||||||
def py__iter__(self):
|
def py__iter__(self):
|
||||||
var_args = self.var_args
|
var_args = self.var_args
|
||||||
|
|||||||
@@ -38,11 +38,12 @@ py__doc__(include_call_signature: Returns the docstring for a context.
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
from jedi._compatibility import use_metaclass
|
from jedi._compatibility import use_metaclass
|
||||||
|
from jedi.parser_utils import get_parent_scope
|
||||||
from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass
|
from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass
|
||||||
from jedi.evaluate import compiled
|
from jedi.evaluate import compiled
|
||||||
from jedi.evaluate.lazy_context import LazyKnownContext
|
from jedi.evaluate.lazy_context import LazyKnownContext
|
||||||
from jedi.evaluate.filters import ParserTreeFilter, TreeNameDefinition, \
|
from jedi.evaluate.filters import ParserTreeFilter, TreeNameDefinition, \
|
||||||
ContextName, AnonymousInstanceParamName
|
ContextName
|
||||||
from jedi.evaluate.base_context import ContextSet, iterator_to_context_set, \
|
from jedi.evaluate.base_context import ContextSet, iterator_to_context_set, \
|
||||||
TreeContext
|
TreeContext
|
||||||
|
|
||||||
@@ -58,9 +59,10 @@ def apply_py__get__(context, base_context):
|
|||||||
|
|
||||||
|
|
||||||
class ClassName(TreeNameDefinition):
|
class ClassName(TreeNameDefinition):
|
||||||
def __init__(self, parent_context, tree_name, name_context):
|
def __init__(self, parent_context, tree_name, name_context, apply_decorators):
|
||||||
super(ClassName, self).__init__(parent_context, tree_name)
|
super(ClassName, self).__init__(parent_context, tree_name)
|
||||||
self._name_context = name_context
|
self._name_context = name_context
|
||||||
|
self._apply_decorators = apply_decorators
|
||||||
|
|
||||||
@iterator_to_context_set
|
@iterator_to_context_set
|
||||||
def infer(self):
|
def infer(self):
|
||||||
@@ -72,16 +74,45 @@ class ClassName(TreeNameDefinition):
|
|||||||
self.parent_context.evaluator, self._name_context, self.tree_name)
|
self.parent_context.evaluator, self._name_context, self.tree_name)
|
||||||
|
|
||||||
for result_context in inferred:
|
for result_context in inferred:
|
||||||
for c in apply_py__get__(result_context, self.parent_context):
|
if self._apply_decorators:
|
||||||
yield c
|
for c in apply_py__get__(result_context, self.parent_context):
|
||||||
|
yield c
|
||||||
|
else:
|
||||||
|
yield result_context
|
||||||
|
|
||||||
|
|
||||||
class ClassFilter(ParserTreeFilter):
|
class ClassFilter(ParserTreeFilter):
|
||||||
name_class = ClassName
|
name_class = ClassName
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self._is_instance = kwargs.pop('is_instance') # Python 2 :/
|
||||||
|
super(ClassFilter, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
def _convert_names(self, names):
|
def _convert_names(self, names):
|
||||||
return [self.name_class(self.context, name, self._node_context)
|
return [
|
||||||
for name in names]
|
self.name_class(
|
||||||
|
parent_context=self.context,
|
||||||
|
tree_name=name,
|
||||||
|
name_context=self._node_context,
|
||||||
|
apply_decorators=not self._is_instance,
|
||||||
|
) for name in names
|
||||||
|
]
|
||||||
|
|
||||||
|
def _equals_origin_scope(self):
|
||||||
|
node = self._origin_scope
|
||||||
|
while node is not None:
|
||||||
|
if node == self._parser_scope or node == self.context:
|
||||||
|
return True
|
||||||
|
node = get_parent_scope(node)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _access_possible(self, name):
|
||||||
|
return not name.value.startswith('__') or name.value.endswith('__') \
|
||||||
|
or self._equals_origin_scope()
|
||||||
|
|
||||||
|
def _filter(self, names):
|
||||||
|
names = super(ClassFilter, self)._filter(names)
|
||||||
|
return [name for name in names if self._access_possible(name)]
|
||||||
|
|
||||||
|
|
||||||
class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
|
class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
|
||||||
@@ -91,10 +122,6 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
|
|||||||
"""
|
"""
|
||||||
api_type = u'class'
|
api_type = u'class'
|
||||||
|
|
||||||
def __init__(self, evaluator, parent_context, classdef):
|
|
||||||
super(ClassContext, self).__init__(evaluator, parent_context=parent_context)
|
|
||||||
self.tree_node = classdef
|
|
||||||
|
|
||||||
@evaluator_method_cache(default=())
|
@evaluator_method_cache(default=())
|
||||||
def py__mro__(self):
|
def py__mro__(self):
|
||||||
def add(cls):
|
def add(cls):
|
||||||
@@ -148,11 +175,6 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
|
|||||||
def py__class__(self):
|
def py__class__(self):
|
||||||
return compiled.builtin_from_name(self.evaluator, u'type')
|
return compiled.builtin_from_name(self.evaluator, u'type')
|
||||||
|
|
||||||
def get_params(self):
|
|
||||||
from jedi.evaluate.context import AnonymousInstance
|
|
||||||
anon = AnonymousInstance(self.evaluator, self.parent_context, self)
|
|
||||||
return [AnonymousInstanceParamName(anon, param.name) for param in self.funcdef.get_params()]
|
|
||||||
|
|
||||||
def get_filters(self, search_global, until_position=None, origin_scope=None, is_instance=False):
|
def get_filters(self, search_global, until_position=None, origin_scope=None, is_instance=False):
|
||||||
if search_global:
|
if search_global:
|
||||||
yield ParserTreeFilter(
|
yield ParserTreeFilter(
|
||||||
@@ -169,7 +191,9 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)):
|
|||||||
else:
|
else:
|
||||||
yield ClassFilter(
|
yield ClassFilter(
|
||||||
self.evaluator, self, node_context=cls,
|
self.evaluator, self, node_context=cls,
|
||||||
origin_scope=origin_scope)
|
origin_scope=origin_scope,
|
||||||
|
is_instance=is_instance
|
||||||
|
)
|
||||||
|
|
||||||
def is_class(self):
|
def is_class(self):
|
||||||
return True
|
return True
|
||||||
|
|||||||
@@ -43,8 +43,11 @@ class ModuleContext(TreeContext):
|
|||||||
parent_context = None
|
parent_context = None
|
||||||
|
|
||||||
def __init__(self, evaluator, module_node, path, code_lines):
|
def __init__(self, evaluator, module_node, path, code_lines):
|
||||||
super(ModuleContext, self).__init__(evaluator, parent_context=None)
|
super(ModuleContext, self).__init__(
|
||||||
self.tree_node = module_node
|
evaluator,
|
||||||
|
parent_context=None,
|
||||||
|
tree_node=module_node
|
||||||
|
)
|
||||||
self._path = path
|
self._path = path
|
||||||
self.code_lines = code_lines
|
self.code_lines = code_lines
|
||||||
|
|
||||||
@@ -186,13 +189,17 @@ class ModuleContext(TreeContext):
|
|||||||
Lists modules in the directory of this module (if this module is a
|
Lists modules in the directory of this module (if this module is a
|
||||||
package).
|
package).
|
||||||
"""
|
"""
|
||||||
path = self._path
|
|
||||||
names = {}
|
names = {}
|
||||||
if path is not None and path.endswith(os.path.sep + '__init__.py'):
|
try:
|
||||||
mods = iter_modules([os.path.dirname(path)])
|
method = self.py__path__
|
||||||
for module_loader, name, is_pkg in mods:
|
except AttributeError:
|
||||||
# It's obviously a relative import to the current module.
|
pass
|
||||||
names[name] = SubModuleName(self, name)
|
else:
|
||||||
|
for path in method():
|
||||||
|
mods = iter_modules([path])
|
||||||
|
for module_loader, name, is_pkg in mods:
|
||||||
|
# It's obviously a relative import to the current module.
|
||||||
|
names[name] = SubModuleName(self, name)
|
||||||
|
|
||||||
# TODO add something like this in the future, its cleaner than the
|
# TODO add something like this in the future, its cleaner than the
|
||||||
# import hacks.
|
# import hacks.
|
||||||
|
|||||||
@@ -3,27 +3,21 @@ from itertools import chain
|
|||||||
|
|
||||||
from jedi.evaluate.cache import evaluator_method_cache
|
from jedi.evaluate.cache import evaluator_method_cache
|
||||||
from jedi.evaluate import imports
|
from jedi.evaluate import imports
|
||||||
from jedi.evaluate.filters import DictFilter, AbstractNameDefinition
|
from jedi.evaluate.filters import DictFilter, AbstractNameDefinition, ContextNameMixin
|
||||||
from jedi.evaluate.base_context import TreeContext, ContextSet
|
from jedi.evaluate.base_context import Context
|
||||||
|
|
||||||
|
|
||||||
class ImplicitNSName(AbstractNameDefinition):
|
class ImplicitNSName(ContextNameMixin, AbstractNameDefinition):
|
||||||
"""
|
"""
|
||||||
Accessing names for implicit namespace packages should infer to nothing.
|
Accessing names for implicit namespace packages should infer to nothing.
|
||||||
This object will prevent Jedi from raising exceptions
|
This object will prevent Jedi from raising exceptions
|
||||||
"""
|
"""
|
||||||
def __init__(self, implicit_ns_context, string_name):
|
def __init__(self, implicit_ns_context, string_name):
|
||||||
self.parent_context = implicit_ns_context
|
self._context = implicit_ns_context
|
||||||
self.string_name = string_name
|
self.string_name = string_name
|
||||||
|
|
||||||
def infer(self):
|
|
||||||
return ContextSet(self.parent_context)
|
|
||||||
|
|
||||||
def get_root_context(self):
|
class ImplicitNamespaceContext(Context):
|
||||||
return self.parent_context
|
|
||||||
|
|
||||||
|
|
||||||
class ImplicitNamespaceContext(TreeContext):
|
|
||||||
"""
|
"""
|
||||||
Provides support for implicit namespace packages
|
Provides support for implicit namespace packages
|
||||||
"""
|
"""
|
||||||
@@ -56,9 +50,11 @@ class ImplicitNamespaceContext(TreeContext):
|
|||||||
"""
|
"""
|
||||||
return self._fullname
|
return self._fullname
|
||||||
|
|
||||||
@property
|
|
||||||
def py__path__(self):
|
def py__path__(self):
|
||||||
return lambda: [self.paths]
|
return [self.paths]
|
||||||
|
|
||||||
|
def py__name__(self):
|
||||||
|
return self._fullname
|
||||||
|
|
||||||
@evaluator_method_cache()
|
@evaluator_method_cache()
|
||||||
def _sub_modules_dict(self):
|
def _sub_modules_dict(self):
|
||||||
|
|||||||
@@ -47,13 +47,14 @@ _numpy_doc_string_cache = None
|
|||||||
|
|
||||||
def _get_numpy_doc_string_cls():
|
def _get_numpy_doc_string_cls():
|
||||||
global _numpy_doc_string_cache
|
global _numpy_doc_string_cache
|
||||||
|
if isinstance(_numpy_doc_string_cache, ImportError):
|
||||||
|
raise _numpy_doc_string_cache
|
||||||
try:
|
try:
|
||||||
from numpydoc.docscrape import NumpyDocString
|
from numpydoc.docscrape import NumpyDocString
|
||||||
_numpy_doc_string_cache = NumpyDocString
|
_numpy_doc_string_cache = NumpyDocString
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
_numpy_doc_string_cache = e
|
_numpy_doc_string_cache = e
|
||||||
if isinstance(_numpy_doc_string_cache, ImportError):
|
raise
|
||||||
raise _numpy_doc_string_cache
|
|
||||||
return _numpy_doc_string_cache
|
return _numpy_doc_string_cache
|
||||||
|
|
||||||
|
|
||||||
@@ -67,7 +68,7 @@ def _search_param_in_numpydocstr(docstr, param_str):
|
|||||||
return []
|
return []
|
||||||
for p_name, p_type, p_descr in params:
|
for p_name, p_type, p_descr in params:
|
||||||
if p_name == param_str:
|
if p_name == param_str:
|
||||||
m = re.match('([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type)
|
m = re.match(r'([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type)
|
||||||
if m:
|
if m:
|
||||||
p_type = m.group(1)
|
p_type = m.group(1)
|
||||||
return list(_expand_typestr(p_type))
|
return list(_expand_typestr(p_type))
|
||||||
@@ -102,11 +103,11 @@ def _expand_typestr(type_str):
|
|||||||
Attempts to interpret the possible types in `type_str`
|
Attempts to interpret the possible types in `type_str`
|
||||||
"""
|
"""
|
||||||
# Check if alternative types are specified with 'or'
|
# Check if alternative types are specified with 'or'
|
||||||
if re.search('\\bor\\b', type_str):
|
if re.search(r'\bor\b', type_str):
|
||||||
for t in type_str.split('or'):
|
for t in type_str.split('or'):
|
||||||
yield t.split('of')[0].strip()
|
yield t.split('of')[0].strip()
|
||||||
# Check if like "list of `type`" and set type to list
|
# Check if like "list of `type`" and set type to list
|
||||||
elif re.search('\\bof\\b', type_str):
|
elif re.search(r'\bof\b', type_str):
|
||||||
yield type_str.split('of')[0]
|
yield type_str.split('of')[0]
|
||||||
# Check if type has is a set of valid literal values eg: {'C', 'F', 'A'}
|
# Check if type has is a set of valid literal values eg: {'C', 'F', 'A'}
|
||||||
elif type_str.startswith('{'):
|
elif type_str.startswith('{'):
|
||||||
@@ -193,7 +194,7 @@ def _evaluate_for_statement_string(module_context, string):
|
|||||||
if string is None:
|
if string is None:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
for element in re.findall('((?:\w+\.)*\w+)\.', string):
|
for element in re.findall(r'((?:\w+\.)*\w+)\.', string):
|
||||||
# Try to import module part in dotted name.
|
# Try to import module part in dotted name.
|
||||||
# (e.g., 'threading' in 'threading.Thread').
|
# (e.g., 'threading' in 'threading.Thread').
|
||||||
string = 'import %s\n' % element + string
|
string = 'import %s\n' % element + string
|
||||||
@@ -214,6 +215,9 @@ def _evaluate_for_statement_string(module_context, string):
|
|||||||
except (AttributeError, IndexError):
|
except (AttributeError, IndexError):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
if stmt.type not in ('name', 'atom', 'atom_expr'):
|
||||||
|
return []
|
||||||
|
|
||||||
from jedi.evaluate.context import FunctionContext
|
from jedi.evaluate.context import FunctionContext
|
||||||
function_context = FunctionContext(
|
function_context = FunctionContext(
|
||||||
module_context.evaluator,
|
module_context.evaluator,
|
||||||
@@ -262,7 +266,8 @@ def _execute_array_values(evaluator, array):
|
|||||||
|
|
||||||
@evaluator_method_cache()
|
@evaluator_method_cache()
|
||||||
def infer_param(execution_context, param):
|
def infer_param(execution_context, param):
|
||||||
from jedi.evaluate.context.instance import AnonymousInstanceFunctionExecution
|
from jedi.evaluate.context.instance import InstanceArguments
|
||||||
|
from jedi.evaluate.context import FunctionExecutionContext
|
||||||
|
|
||||||
def eval_docstring(docstring):
|
def eval_docstring(docstring):
|
||||||
return ContextSet.from_iterable(
|
return ContextSet.from_iterable(
|
||||||
@@ -276,9 +281,10 @@ def infer_param(execution_context, param):
|
|||||||
return NO_CONTEXTS
|
return NO_CONTEXTS
|
||||||
|
|
||||||
types = eval_docstring(execution_context.py__doc__())
|
types = eval_docstring(execution_context.py__doc__())
|
||||||
if isinstance(execution_context, AnonymousInstanceFunctionExecution) and \
|
if isinstance(execution_context, FunctionExecutionContext) \
|
||||||
execution_context.function_context.name.string_name == '__init__':
|
and isinstance(execution_context.var_args, InstanceArguments) \
|
||||||
class_context = execution_context.instance.class_context
|
and execution_context.function_context.py__name__() == '__init__':
|
||||||
|
class_context = execution_context.var_args.instance.class_context
|
||||||
types |= eval_docstring(class_context.py__doc__())
|
types |= eval_docstring(class_context.py__doc__())
|
||||||
|
|
||||||
return types
|
return types
|
||||||
|
|||||||
@@ -28,22 +28,30 @@ from jedi.evaluate.helpers import is_stdlib_path
|
|||||||
from jedi.evaluate.utils import to_list
|
from jedi.evaluate.utils import to_list
|
||||||
from jedi.parser_utils import get_parent_scope
|
from jedi.parser_utils import get_parent_scope
|
||||||
from jedi.evaluate.context import ModuleContext, instance
|
from jedi.evaluate.context import ModuleContext, instance
|
||||||
from jedi.evaluate.base_context import ContextSet
|
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
|
||||||
|
from jedi.evaluate import recursion
|
||||||
|
|
||||||
|
|
||||||
MAX_PARAM_SEARCHES = 20
|
MAX_PARAM_SEARCHES = 20
|
||||||
|
|
||||||
|
|
||||||
class MergedExecutedParams(object):
|
class DynamicExecutedParams(object):
|
||||||
"""
|
"""
|
||||||
Simulates being a parameter while actually just being multiple params.
|
Simulates being a parameter while actually just being multiple params.
|
||||||
"""
|
"""
|
||||||
def __init__(self, executed_params):
|
|
||||||
|
def __init__(self, evaluator, executed_params):
|
||||||
|
self.evaluator = evaluator
|
||||||
self._executed_params = executed_params
|
self._executed_params = executed_params
|
||||||
|
|
||||||
def infer(self):
|
def infer(self):
|
||||||
return ContextSet.from_sets(p.infer() for p in self._executed_params)
|
with recursion.execution_allowed(self.evaluator, self) as allowed:
|
||||||
|
# We need to catch recursions that may occur, because an
|
||||||
|
# anonymous functions can create an anonymous parameter that is
|
||||||
|
# more or less self referencing.
|
||||||
|
if allowed:
|
||||||
|
return ContextSet.from_sets(p.infer() for p in self._executed_params)
|
||||||
|
return NO_CONTEXTS
|
||||||
|
|
||||||
|
|
||||||
@debug.increase_indent
|
@debug.increase_indent
|
||||||
@@ -91,10 +99,10 @@ def search_params(evaluator, execution_context, funcdef):
|
|||||||
)
|
)
|
||||||
if function_executions:
|
if function_executions:
|
||||||
zipped_params = zip(*list(
|
zipped_params = zip(*list(
|
||||||
function_execution.get_params()
|
function_execution.get_executed_params()
|
||||||
for function_execution in function_executions
|
for function_execution in function_executions
|
||||||
))
|
))
|
||||||
params = [MergedExecutedParams(executed_params) for executed_params in zipped_params]
|
params = [DynamicExecutedParams(evaluator, executed_params) for executed_params in zipped_params]
|
||||||
# Evaluate the ExecutedParams to types.
|
# Evaluate the ExecutedParams to types.
|
||||||
else:
|
else:
|
||||||
return create_default_params(execution_context, funcdef)
|
return create_default_params(execution_context, funcdef)
|
||||||
@@ -200,7 +208,7 @@ def _check_name_for_execution(evaluator, context, compare_node, name, trailer):
|
|||||||
# Here we're trying to find decorators by checking the first
|
# Here we're trying to find decorators by checking the first
|
||||||
# parameter. It's not very generic though. Should find a better
|
# parameter. It's not very generic though. Should find a better
|
||||||
# solution that also applies to nested decorators.
|
# solution that also applies to nested decorators.
|
||||||
params = value.parent_context.get_params()
|
params = value.parent_context.get_executed_params()
|
||||||
if len(params) != 1:
|
if len(params) != 1:
|
||||||
continue
|
continue
|
||||||
values = params[0].infer()
|
values = params[0].infer()
|
||||||
|
|||||||
@@ -38,12 +38,6 @@ class AbstractNameDefinition(object):
|
|||||||
return '<%s: %s>' % (self.__class__.__name__, self.string_name)
|
return '<%s: %s>' % (self.__class__.__name__, self.string_name)
|
||||||
return '<%s: %s@%s>' % (self.__class__.__name__, self.string_name, self.start_pos)
|
return '<%s: %s@%s>' % (self.__class__.__name__, self.string_name, self.start_pos)
|
||||||
|
|
||||||
def execute(self, arguments):
|
|
||||||
return self.infer().execute(arguments)
|
|
||||||
|
|
||||||
def execute_evaluated(self, *args, **kwargs):
|
|
||||||
return self.infer().execute_evaluated(*args, **kwargs)
|
|
||||||
|
|
||||||
def is_import(self):
|
def is_import(self):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -142,24 +136,11 @@ class ParamName(AbstractTreeName):
|
|||||||
return self.get_param().infer()
|
return self.get_param().infer()
|
||||||
|
|
||||||
def get_param(self):
|
def get_param(self):
|
||||||
params = self.parent_context.get_params()
|
params = self.parent_context.get_executed_params()
|
||||||
param_node = search_ancestor(self.tree_name, 'param')
|
param_node = search_ancestor(self.tree_name, 'param')
|
||||||
return params[param_node.position_index]
|
return params[param_node.position_index]
|
||||||
|
|
||||||
|
|
||||||
class AnonymousInstanceParamName(ParamName):
|
|
||||||
def infer(self):
|
|
||||||
param_node = search_ancestor(self.tree_name, 'param')
|
|
||||||
# TODO I think this should not belong here. It's not even really true,
|
|
||||||
# because classmethod and other descriptors can change it.
|
|
||||||
if param_node.position_index == 0:
|
|
||||||
# This is a speed optimization, to return the self param (because
|
|
||||||
# it's known). This only affects anonymous instances.
|
|
||||||
return ContextSet(self.parent_context.instance)
|
|
||||||
else:
|
|
||||||
return self.get_param().infer()
|
|
||||||
|
|
||||||
|
|
||||||
class AbstractFilter(object):
|
class AbstractFilter(object):
|
||||||
_until_position = None
|
_until_position = None
|
||||||
|
|
||||||
@@ -272,10 +253,6 @@ class FunctionExecutionFilter(ParserTreeFilter):
|
|||||||
yield TreeNameDefinition(self.context, name)
|
yield TreeNameDefinition(self.context, name)
|
||||||
|
|
||||||
|
|
||||||
class AnonymousInstanceFunctionExecutionFilter(FunctionExecutionFilter):
|
|
||||||
param_name = AnonymousInstanceParamName
|
|
||||||
|
|
||||||
|
|
||||||
class GlobalNameFilter(AbstractUsedNamesFilter):
|
class GlobalNameFilter(AbstractUsedNamesFilter):
|
||||||
def __init__(self, context, parser_scope):
|
def __init__(self, context, parser_scope):
|
||||||
super(GlobalNameFilter, self).__init__(context, parser_scope)
|
super(GlobalNameFilter, self).__init__(context, parser_scope)
|
||||||
|
|||||||
@@ -60,7 +60,8 @@ def reachability_check(context, context_scope, node, origin_scope=None):
|
|||||||
if not branch_matches and origin_keyword == 'else' \
|
if not branch_matches and origin_keyword == 'else' \
|
||||||
and node_keyword == 'except':
|
and node_keyword == 'except':
|
||||||
return UNREACHABLE
|
return UNREACHABLE
|
||||||
break
|
if branch_matches:
|
||||||
|
break
|
||||||
|
|
||||||
# Direct parents get resolved, we filter scopes that are separate
|
# Direct parents get resolved, we filter scopes that are separate
|
||||||
# branches. This makes sense for autocompletion and static analysis.
|
# branches. This makes sense for autocompletion and static analysis.
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ from parso.python import tree
|
|||||||
|
|
||||||
from jedi._compatibility import unicode
|
from jedi._compatibility import unicode
|
||||||
from jedi.parser_utils import get_parent_scope
|
from jedi.parser_utils import get_parent_scope
|
||||||
from jedi.evaluate.compiled import CompiledObject
|
|
||||||
|
|
||||||
|
|
||||||
def is_stdlib_path(path):
|
def is_stdlib_path(path):
|
||||||
@@ -20,7 +19,7 @@ def is_stdlib_path(path):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
base_path = os.path.join(sys.prefix, 'lib', 'python')
|
base_path = os.path.join(sys.prefix, 'lib', 'python')
|
||||||
return bool(re.match(re.escape(base_path) + '\d.\d', path))
|
return bool(re.match(re.escape(base_path) + r'\d.\d', path))
|
||||||
|
|
||||||
|
|
||||||
def deep_ast_copy(obj):
|
def deep_ast_copy(obj):
|
||||||
@@ -65,6 +64,10 @@ def evaluate_call_of_leaf(context, leaf, cut_own_trailer=False):
|
|||||||
The option ``cut_own_trailer`` must be set to true for the second purpose.
|
The option ``cut_own_trailer`` must be set to true for the second purpose.
|
||||||
"""
|
"""
|
||||||
trailer = leaf.parent
|
trailer = leaf.parent
|
||||||
|
if trailer.type == 'fstring':
|
||||||
|
from jedi.evaluate import compiled
|
||||||
|
return compiled.get_string_context_set(context.evaluator)
|
||||||
|
|
||||||
# The leaf may not be the last or first child, because there exist three
|
# The leaf may not be the last or first child, because there exist three
|
||||||
# different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
|
# different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
|
||||||
# we should not match anything more than x.
|
# we should not match anything more than x.
|
||||||
@@ -184,6 +187,7 @@ def predefine_names(context, flow_scope, dct):
|
|||||||
|
|
||||||
|
|
||||||
def is_compiled(context):
|
def is_compiled(context):
|
||||||
|
from jedi.evaluate.compiled import CompiledObject
|
||||||
return isinstance(context, CompiledObject)
|
return isinstance(context, CompiledObject)
|
||||||
|
|
||||||
|
|
||||||
@@ -212,3 +216,24 @@ def get_int_or_none(context):
|
|||||||
|
|
||||||
def is_number(context):
|
def is_number(context):
|
||||||
return _get_safe_value_or_none(context, (int, float)) is not None
|
return _get_safe_value_or_none(context, (int, float)) is not None
|
||||||
|
|
||||||
|
|
||||||
|
class EvaluatorTypeError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class EvaluatorIndexError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class EvaluatorKeyError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def reraise_as_evaluator(*exception_classes):
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
except exception_classes as e:
|
||||||
|
new_exc_cls = globals()['Evaluator' + e.__class__.__name__]
|
||||||
|
raise new_exc_cls(e)
|
||||||
|
|||||||
@@ -17,7 +17,8 @@ from parso.python import tree
|
|||||||
from parso.tree import search_ancestor
|
from parso.tree import search_ancestor
|
||||||
from parso import python_bytes_to_unicode
|
from parso import python_bytes_to_unicode
|
||||||
|
|
||||||
from jedi._compatibility import unicode, ImplicitNSInfo, force_unicode
|
from jedi._compatibility import (FileNotFoundError, ImplicitNSInfo,
|
||||||
|
force_unicode, unicode)
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi import settings
|
from jedi import settings
|
||||||
from jedi.parser_utils import get_cached_code_lines
|
from jedi.parser_utils import get_cached_code_lines
|
||||||
@@ -25,7 +26,7 @@ from jedi.evaluate import sys_path
|
|||||||
from jedi.evaluate import helpers
|
from jedi.evaluate import helpers
|
||||||
from jedi.evaluate import compiled
|
from jedi.evaluate import compiled
|
||||||
from jedi.evaluate import analysis
|
from jedi.evaluate import analysis
|
||||||
from jedi.evaluate.utils import unite, dotted_from_fs_path
|
from jedi.evaluate.utils import unite
|
||||||
from jedi.evaluate.cache import evaluator_method_cache
|
from jedi.evaluate.cache import evaluator_method_cache
|
||||||
from jedi.evaluate.filters import AbstractNameDefinition
|
from jedi.evaluate.filters import AbstractNameDefinition
|
||||||
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
|
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
|
||||||
@@ -263,8 +264,11 @@ class Importer(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def sys_path_with_modifications(self):
|
def sys_path_with_modifications(self):
|
||||||
sys_path_mod = self._evaluator.get_sys_path() \
|
|
||||||
+ sys_path.check_sys_path_modifications(self.module_context)
|
sys_path_mod = (
|
||||||
|
self._evaluator.get_sys_path()
|
||||||
|
+ sys_path.check_sys_path_modifications(self.module_context)
|
||||||
|
)
|
||||||
|
|
||||||
if self.import_path and self.file_path is not None \
|
if self.import_path and self.file_path is not None \
|
||||||
and self._evaluator.environment.version_info.major == 2:
|
and self._evaluator.environment.version_info.major == 2:
|
||||||
@@ -274,8 +278,9 @@ class Importer(object):
|
|||||||
return sys_path_mod
|
return sys_path_mod
|
||||||
|
|
||||||
def follow(self):
|
def follow(self):
|
||||||
if not self.import_path:
|
if not self.import_path or not self._evaluator.infer_enabled:
|
||||||
return NO_CONTEXTS
|
return NO_CONTEXTS
|
||||||
|
|
||||||
return self._do_import(self.import_path, self.sys_path_with_modifications())
|
return self._do_import(self.import_path, self.sys_path_with_modifications())
|
||||||
|
|
||||||
def _do_import(self, import_path, sys_path):
|
def _do_import(self, import_path, sys_path):
|
||||||
@@ -299,6 +304,14 @@ class Importer(object):
|
|||||||
# Old style
|
# Old style
|
||||||
return self._do_import(('flaskext',) + import_path[2:], sys_path)
|
return self._do_import(('flaskext',) + import_path[2:], sys_path)
|
||||||
|
|
||||||
|
if import_parts[0] in settings.auto_import_modules:
|
||||||
|
module = _load_module(
|
||||||
|
self._evaluator,
|
||||||
|
import_names=import_parts,
|
||||||
|
sys_path=sys_path,
|
||||||
|
)
|
||||||
|
return ContextSet(module)
|
||||||
|
|
||||||
module_name = '.'.join(import_parts)
|
module_name = '.'.join(import_parts)
|
||||||
try:
|
try:
|
||||||
return ContextSet(self._evaluator.module_cache.get(module_name))
|
return ContextSet(self._evaluator.module_cache.get(module_name))
|
||||||
@@ -340,7 +353,8 @@ class Importer(object):
|
|||||||
code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
|
code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
|
||||||
string=import_parts[-1],
|
string=import_parts[-1],
|
||||||
path=path,
|
path=path,
|
||||||
full_name=module_name
|
full_name=module_name,
|
||||||
|
is_global_search=False,
|
||||||
)
|
)
|
||||||
if module_path is not None:
|
if module_path is not None:
|
||||||
break
|
break
|
||||||
@@ -348,13 +362,14 @@ class Importer(object):
|
|||||||
_add_error(self.module_context, import_path[-1])
|
_add_error(self.module_context, import_path[-1])
|
||||||
return NO_CONTEXTS
|
return NO_CONTEXTS
|
||||||
else:
|
else:
|
||||||
debug.dbg('search_module %s in %s', import_parts[-1], self.file_path)
|
debug.dbg('global search_module %s in %s', import_parts[-1], self.file_path)
|
||||||
# Override the sys.path. It works only good that way.
|
# Override the sys.path. It works only good that way.
|
||||||
# Injecting the path directly into `find_module` did not work.
|
# Injecting the path directly into `find_module` did not work.
|
||||||
code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
|
code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info(
|
||||||
string=import_parts[-1],
|
string=import_parts[-1],
|
||||||
full_name=module_name,
|
full_name=module_name,
|
||||||
sys_path=sys_path,
|
sys_path=sys_path,
|
||||||
|
is_global_search=True,
|
||||||
)
|
)
|
||||||
if module_path is None:
|
if module_path is None:
|
||||||
# The module is not a package.
|
# The module is not a package.
|
||||||
@@ -363,7 +378,7 @@ class Importer(object):
|
|||||||
|
|
||||||
module = _load_module(
|
module = _load_module(
|
||||||
self._evaluator, module_path, code, sys_path,
|
self._evaluator, module_path, code, sys_path,
|
||||||
module_name=module_name,
|
import_names=import_parts,
|
||||||
safe_module_name=True,
|
safe_module_name=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -463,9 +478,13 @@ class Importer(object):
|
|||||||
|
|
||||||
|
|
||||||
def _load_module(evaluator, path=None, code=None, sys_path=None,
|
def _load_module(evaluator, path=None, code=None, sys_path=None,
|
||||||
module_name=None, safe_module_name=False):
|
import_names=None, safe_module_name=False):
|
||||||
|
if import_names is None:
|
||||||
|
dotted_name = None
|
||||||
|
else:
|
||||||
|
dotted_name = '.'.join(import_names)
|
||||||
try:
|
try:
|
||||||
return evaluator.module_cache.get(module_name)
|
return evaluator.module_cache.get(dotted_name)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
@@ -484,12 +503,10 @@ def _load_module(evaluator, path=None, code=None, sys_path=None,
|
|||||||
if sys_path is None:
|
if sys_path is None:
|
||||||
sys_path = evaluator.get_sys_path()
|
sys_path = evaluator.get_sys_path()
|
||||||
|
|
||||||
dotted_path = path and dotted_from_fs_path(path, sys_path)
|
if path is not None and path.endswith(('.py', '.zip', '.egg')):
|
||||||
if path is not None and path.endswith(('.py', '.zip', '.egg')) \
|
|
||||||
and dotted_path not in settings.auto_import_modules:
|
|
||||||
|
|
||||||
module_node = evaluator.parse(
|
module_node = evaluator.parse(
|
||||||
code=code, path=path, cache=True, diff_cache=True,
|
code=code, path=path, cache=True,
|
||||||
|
diff_cache=settings.fast_parser,
|
||||||
cache_path=settings.cache_directory)
|
cache_path=settings.cache_directory)
|
||||||
|
|
||||||
from jedi.evaluate.context import ModuleContext
|
from jedi.evaluate.context import ModuleContext
|
||||||
@@ -499,10 +516,11 @@ def _load_module(evaluator, path=None, code=None, sys_path=None,
|
|||||||
code_lines=get_cached_code_lines(evaluator.grammar, path),
|
code_lines=get_cached_code_lines(evaluator.grammar, path),
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
module = compiled.load_module(evaluator, path=path, sys_path=sys_path)
|
assert dotted_name is not None
|
||||||
|
module = compiled.load_module(evaluator, dotted_name=dotted_name, sys_path=sys_path)
|
||||||
|
|
||||||
if module is not None and module_name is not None:
|
if module is not None and dotted_name is not None:
|
||||||
add_module_to_cache(evaluator, module_name, module, safe=safe_module_name)
|
add_module_to_cache(evaluator, dotted_name, module, safe=safe_module_name)
|
||||||
|
|
||||||
return module
|
return module
|
||||||
|
|
||||||
@@ -533,14 +551,19 @@ def get_modules_containing_name(evaluator, modules, name):
|
|||||||
yield path
|
yield path
|
||||||
|
|
||||||
def check_fs(path):
|
def check_fs(path):
|
||||||
with open(path, 'rb') as f:
|
try:
|
||||||
|
f = open(path, 'rb')
|
||||||
|
except FileNotFoundError:
|
||||||
|
return
|
||||||
|
with f:
|
||||||
code = python_bytes_to_unicode(f.read(), errors='replace')
|
code = python_bytes_to_unicode(f.read(), errors='replace')
|
||||||
if name in code:
|
if name in code:
|
||||||
e_sys_path = evaluator.get_sys_path()
|
e_sys_path = evaluator.get_sys_path()
|
||||||
module_name = sys_path.dotted_path_in_sys_path(e_sys_path, path)
|
import_names = sys_path.dotted_path_in_sys_path(e_sys_path, path)
|
||||||
module = _load_module(
|
module = _load_module(
|
||||||
evaluator, path, code,
|
evaluator, path, code,
|
||||||
sys_path=e_sys_path, module_name=module_name
|
sys_path=e_sys_path,
|
||||||
|
import_names=import_names,
|
||||||
)
|
)
|
||||||
return module
|
return module
|
||||||
|
|
||||||
|
|||||||
@@ -81,6 +81,9 @@ def factory(typing_name, indextypes):
|
|||||||
class Dict(MutableMapping, dict):
|
class Dict(MutableMapping, dict):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
class DefaultDict(MutableMapping, dict):
|
||||||
|
pass
|
||||||
|
|
||||||
dct = {
|
dct = {
|
||||||
"Sequence": Sequence,
|
"Sequence": Sequence,
|
||||||
"MutableSequence": MutableSequence,
|
"MutableSequence": MutableSequence,
|
||||||
@@ -96,5 +99,6 @@ def factory(typing_name, indextypes):
|
|||||||
"ItemsView": ItemsView,
|
"ItemsView": ItemsView,
|
||||||
"ValuesView": ValuesView,
|
"ValuesView": ValuesView,
|
||||||
"Dict": Dict,
|
"Dict": Dict,
|
||||||
|
"DefaultDict": DefaultDict,
|
||||||
}
|
}
|
||||||
return dct[typing_name]
|
return dct[typing_name]
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
|
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
|
||||||
|
from jedi.common.utils import monkeypatch
|
||||||
|
|
||||||
|
|
||||||
class AbstractLazyContext(object):
|
class AbstractLazyContext(object):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
@@ -40,12 +42,8 @@ class LazyTreeContext(AbstractLazyContext):
|
|||||||
self._predefined_names = dict(context.predefined_names)
|
self._predefined_names = dict(context.predefined_names)
|
||||||
|
|
||||||
def infer(self):
|
def infer(self):
|
||||||
old, self._context.predefined_names = \
|
with monkeypatch(self._context, 'predefined_names', self._predefined_names):
|
||||||
self._context.predefined_names, self._predefined_names
|
|
||||||
try:
|
|
||||||
return self._context.eval_node(self.data)
|
return self._context.eval_node(self.data)
|
||||||
finally:
|
|
||||||
self._context.predefined_names = old
|
|
||||||
|
|
||||||
|
|
||||||
def get_merged_lazy_context(lazy_contexts):
|
def get_merged_lazy_context(lazy_contexts):
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ class ExecutedParam(object):
|
|||||||
return '<%s: %s>' % (self.__class__.__name__, self.string_name)
|
return '<%s: %s>' % (self.__class__.__name__, self.string_name)
|
||||||
|
|
||||||
|
|
||||||
def get_params(execution_context, var_args):
|
def get_executed_params(execution_context, var_args):
|
||||||
result_params = []
|
result_params = []
|
||||||
param_dict = {}
|
param_dict = {}
|
||||||
funcdef = execution_context.tree_node
|
funcdef = execution_context.tree_node
|
||||||
|
|||||||
@@ -157,8 +157,8 @@ def infer_param(execution_context, param):
|
|||||||
"Comments length != Params length %s %s",
|
"Comments length != Params length %s %s",
|
||||||
params_comments, all_params
|
params_comments, all_params
|
||||||
)
|
)
|
||||||
from jedi.evaluate.context.instance import BaseInstanceFunctionExecution
|
from jedi.evaluate.context.instance import InstanceArguments
|
||||||
if isinstance(execution_context, BaseInstanceFunctionExecution):
|
if isinstance(execution_context.var_args, InstanceArguments):
|
||||||
if index == 0:
|
if index == 0:
|
||||||
# Assume it's self, which is already handled
|
# Assume it's self, which is already handled
|
||||||
return NO_CONTEXTS
|
return NO_CONTEXTS
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ def execution_allowed(evaluator, node):
|
|||||||
|
|
||||||
if node in pushed_nodes:
|
if node in pushed_nodes:
|
||||||
debug.warning('catched stmt recursion: %s @%s', node,
|
debug.warning('catched stmt recursion: %s @%s', node,
|
||||||
node.start_pos)
|
getattr(node, 'start_pos', None))
|
||||||
yield False
|
yield False
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
@@ -77,14 +77,14 @@ def execution_allowed(evaluator, node):
|
|||||||
|
|
||||||
def execution_recursion_decorator(default=NO_CONTEXTS):
|
def execution_recursion_decorator(default=NO_CONTEXTS):
|
||||||
def decorator(func):
|
def decorator(func):
|
||||||
def wrapper(execution, **kwargs):
|
def wrapper(self, **kwargs):
|
||||||
detector = execution.evaluator.execution_recursion_detector
|
detector = self.evaluator.execution_recursion_detector
|
||||||
allowed = detector.push_execution(execution)
|
allowed = detector.push_execution(self)
|
||||||
try:
|
try:
|
||||||
if allowed:
|
if allowed:
|
||||||
result = default
|
result = default
|
||||||
else:
|
else:
|
||||||
result = func(execution, **kwargs)
|
result = func(self, **kwargs)
|
||||||
finally:
|
finally:
|
||||||
detector.pop_execution()
|
detector.pop_execution()
|
||||||
return result
|
return result
|
||||||
|
|||||||
@@ -9,21 +9,18 @@ Note that this module exists only to implement very specific functionality in
|
|||||||
the standard library. The usual way to understand the standard library is the
|
the standard library. The usual way to understand the standard library is the
|
||||||
compiled module that returns the types for C-builtins.
|
compiled module that returns the types for C-builtins.
|
||||||
"""
|
"""
|
||||||
import re
|
|
||||||
|
|
||||||
import parso
|
import parso
|
||||||
|
|
||||||
from jedi._compatibility import force_unicode
|
from jedi._compatibility import force_unicode
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
from jedi.evaluate.arguments import ValuesArguments
|
from jedi.evaluate.arguments import ValuesArguments, repack_with_argument_clinic
|
||||||
from jedi.evaluate import analysis
|
from jedi.evaluate import analysis
|
||||||
from jedi.evaluate import compiled
|
from jedi.evaluate import compiled
|
||||||
from jedi.evaluate.context.instance import InstanceFunctionExecution, \
|
from jedi.evaluate.context.instance import \
|
||||||
AbstractInstanceContext, CompiledInstance, BoundMethod, \
|
AbstractInstanceContext, CompiledInstance, BoundMethod, InstanceArguments
|
||||||
AnonymousInstanceFunctionExecution
|
|
||||||
from jedi.evaluate.base_context import ContextualizedNode, \
|
from jedi.evaluate.base_context import ContextualizedNode, \
|
||||||
NO_CONTEXTS, ContextSet
|
NO_CONTEXTS, ContextSet
|
||||||
from jedi.evaluate.context import ClassContext, ModuleContext
|
from jedi.evaluate.context import ClassContext, ModuleContext, FunctionExecutionContext
|
||||||
from jedi.evaluate.context import iterable
|
from jedi.evaluate.context import iterable
|
||||||
from jedi.evaluate.lazy_context import LazyTreeContext
|
from jedi.evaluate.lazy_context import LazyTreeContext
|
||||||
from jedi.evaluate.syntax_tree import is_string
|
from jedi.evaluate.syntax_tree import is_string
|
||||||
@@ -72,7 +69,7 @@ def execute(evaluator, obj, arguments):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
return func(evaluator, obj, arguments)
|
return func(evaluator, obj, arguments=arguments)
|
||||||
raise NotInStdLib()
|
raise NotInStdLib()
|
||||||
|
|
||||||
|
|
||||||
@@ -89,42 +86,22 @@ def argument_clinic(string, want_obj=False, want_context=False, want_arguments=F
|
|||||||
"""
|
"""
|
||||||
Works like Argument Clinic (PEP 436), to validate function params.
|
Works like Argument Clinic (PEP 436), to validate function params.
|
||||||
"""
|
"""
|
||||||
clinic_args = []
|
|
||||||
allow_kwargs = False
|
|
||||||
optional = False
|
|
||||||
while string:
|
|
||||||
# Optional arguments have to begin with a bracket. And should always be
|
|
||||||
# at the end of the arguments. This is therefore not a proper argument
|
|
||||||
# clinic implementation. `range()` for exmple allows an optional start
|
|
||||||
# value at the beginning.
|
|
||||||
match = re.match('(?:(?:(\[),? ?|, ?|)(\w+)|, ?/)\]*', string)
|
|
||||||
string = string[len(match.group(0)):]
|
|
||||||
if not match.group(2): # A slash -> allow named arguments
|
|
||||||
allow_kwargs = True
|
|
||||||
continue
|
|
||||||
optional = optional or bool(match.group(1))
|
|
||||||
word = match.group(2)
|
|
||||||
clinic_args.append((word, optional, allow_kwargs))
|
|
||||||
|
|
||||||
def f(func):
|
def f(func):
|
||||||
def wrapper(evaluator, obj, arguments):
|
@repack_with_argument_clinic(string, keep_arguments_param=True)
|
||||||
|
def wrapper(evaluator, obj, *args, **kwargs):
|
||||||
|
arguments = kwargs.pop('arguments')
|
||||||
|
assert not kwargs # Python 2...
|
||||||
debug.dbg('builtin start %s' % obj, color='MAGENTA')
|
debug.dbg('builtin start %s' % obj, color='MAGENTA')
|
||||||
result = NO_CONTEXTS
|
result = NO_CONTEXTS
|
||||||
try:
|
if want_context:
|
||||||
lst = list(arguments.eval_argument_clinic(clinic_args))
|
kwargs['context'] = arguments.context
|
||||||
except ValueError:
|
if want_obj:
|
||||||
pass
|
kwargs['obj'] = obj
|
||||||
else:
|
if want_arguments:
|
||||||
kwargs = {}
|
kwargs['arguments'] = arguments
|
||||||
if want_context:
|
result = func(evaluator, *args, **kwargs)
|
||||||
kwargs['context'] = arguments.context
|
debug.dbg('builtin end: %s', result, color='MAGENTA')
|
||||||
if want_obj:
|
|
||||||
kwargs['obj'] = obj
|
|
||||||
if want_arguments:
|
|
||||||
kwargs['arguments'] = arguments
|
|
||||||
result = func(evaluator, *lst, **kwargs)
|
|
||||||
finally:
|
|
||||||
debug.dbg('builtin end: %s', result, color='MAGENTA')
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
@@ -187,10 +164,11 @@ class SuperInstance(AbstractInstanceContext):
|
|||||||
@argument_clinic('[type[, obj]], /', want_context=True)
|
@argument_clinic('[type[, obj]], /', want_context=True)
|
||||||
def builtins_super(evaluator, types, objects, context):
|
def builtins_super(evaluator, types, objects, context):
|
||||||
# TODO make this able to detect multiple inheritance super
|
# TODO make this able to detect multiple inheritance super
|
||||||
if isinstance(context, (InstanceFunctionExecution,
|
if isinstance(context, FunctionExecutionContext):
|
||||||
AnonymousInstanceFunctionExecution)):
|
if isinstance(context.var_args, InstanceArguments):
|
||||||
su = context.instance.py__class__().py__bases__()
|
su = context.var_args.instance.py__class__().py__bases__()
|
||||||
return su[0].infer().execute_evaluated()
|
return su[0].infer().execute_evaluated()
|
||||||
|
|
||||||
return NO_CONTEXTS
|
return NO_CONTEXTS
|
||||||
|
|
||||||
|
|
||||||
@@ -334,8 +312,8 @@ _implemented = {
|
|||||||
'deepcopy': _return_first_param,
|
'deepcopy': _return_first_param,
|
||||||
},
|
},
|
||||||
'json': {
|
'json': {
|
||||||
'load': lambda *args: NO_CONTEXTS,
|
'load': lambda evaluator, obj, arguments: NO_CONTEXTS,
|
||||||
'loads': lambda *args: NO_CONTEXTS,
|
'loads': lambda evaluator, obj, arguments: NO_CONTEXTS,
|
||||||
},
|
},
|
||||||
'collections': {
|
'collections': {
|
||||||
'namedtuple': collections_namedtuple,
|
'namedtuple': collections_namedtuple,
|
||||||
|
|||||||
@@ -68,16 +68,10 @@ def eval_node(context, element):
|
|||||||
debug.dbg('eval_node %s@%s', element, element.start_pos)
|
debug.dbg('eval_node %s@%s', element, element.start_pos)
|
||||||
evaluator = context.evaluator
|
evaluator = context.evaluator
|
||||||
typ = element.type
|
typ = element.type
|
||||||
if typ in ('name', 'number', 'string', 'atom', 'strings'):
|
if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword'):
|
||||||
return eval_atom(context, element)
|
return eval_atom(context, element)
|
||||||
elif typ == 'keyword':
|
|
||||||
# For False/True/None
|
|
||||||
if element.value in ('False', 'True', 'None'):
|
|
||||||
return ContextSet(compiled.builtin_from_name(evaluator, element.value))
|
|
||||||
# else: print e.g. could be evaluated like this in Python 2.7
|
|
||||||
return NO_CONTEXTS
|
|
||||||
elif typ == 'lambdef':
|
elif typ == 'lambdef':
|
||||||
return ContextSet(FunctionContext(evaluator, context, element))
|
return ContextSet(FunctionContext.from_context(context, element))
|
||||||
elif typ == 'expr_stmt':
|
elif typ == 'expr_stmt':
|
||||||
return eval_expr_stmt(context, element)
|
return eval_expr_stmt(context, element)
|
||||||
elif typ in ('power', 'atom_expr'):
|
elif typ in ('power', 'atom_expr'):
|
||||||
@@ -207,6 +201,18 @@ def eval_atom(context, atom):
|
|||||||
position=stmt.start_pos,
|
position=stmt.start_pos,
|
||||||
search_global=True
|
search_global=True
|
||||||
)
|
)
|
||||||
|
elif atom.type == 'keyword':
|
||||||
|
# For False/True/None
|
||||||
|
if atom.value in ('False', 'True', 'None'):
|
||||||
|
return ContextSet(compiled.builtin_from_name(context.evaluator, atom.value))
|
||||||
|
elif atom.value == 'print':
|
||||||
|
# print e.g. could be evaluated like this in Python 2.7
|
||||||
|
return NO_CONTEXTS
|
||||||
|
elif atom.value == 'yield':
|
||||||
|
# Contrary to yield from, yield can just appear alone to return a
|
||||||
|
# value when used with `.send()`.
|
||||||
|
return NO_CONTEXTS
|
||||||
|
assert False, 'Cannot evaluate the keyword %s' % atom
|
||||||
|
|
||||||
elif isinstance(atom, tree.Literal):
|
elif isinstance(atom, tree.Literal):
|
||||||
string = context.evaluator.compiled_subprocess.safe_literal_eval(atom.value)
|
string = context.evaluator.compiled_subprocess.safe_literal_eval(atom.value)
|
||||||
@@ -249,7 +255,8 @@ def eval_atom(context, atom):
|
|||||||
array_node_c = array_node.children
|
array_node_c = array_node.children
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
array_node_c = []
|
array_node_c = []
|
||||||
if c[0] == '{' and (array_node == '}' or ':' in array_node_c):
|
if c[0] == '{' and (array_node == '}' or ':' in array_node_c or
|
||||||
|
'**' in array_node_c):
|
||||||
context = iterable.DictLiteralContext(context.evaluator, context, atom)
|
context = iterable.DictLiteralContext(context.evaluator, context, atom)
|
||||||
else:
|
else:
|
||||||
context = iterable.SequenceLiteralContext(context.evaluator, context, atom)
|
context = iterable.SequenceLiteralContext(context.evaluator, context, atom)
|
||||||
@@ -264,7 +271,7 @@ def eval_expr_stmt(context, stmt, seek_name=None):
|
|||||||
# necessary.
|
# necessary.
|
||||||
if not allowed and context.get_root_context() == context.evaluator.builtins_module:
|
if not allowed and context.get_root_context() == context.evaluator.builtins_module:
|
||||||
try:
|
try:
|
||||||
instance = context.instance
|
instance = context.var_args.instance
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
@@ -574,14 +581,10 @@ def _apply_decorators(context, node):
|
|||||||
decoratee_context = ClassContext(
|
decoratee_context = ClassContext(
|
||||||
context.evaluator,
|
context.evaluator,
|
||||||
parent_context=context,
|
parent_context=context,
|
||||||
classdef=node
|
tree_node=node
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
decoratee_context = FunctionContext(
|
decoratee_context = FunctionContext.from_context(context, node)
|
||||||
context.evaluator,
|
|
||||||
parent_context=context,
|
|
||||||
funcdef=node
|
|
||||||
)
|
|
||||||
initial = values = ContextSet(decoratee_context)
|
initial = values = ContextSet(decoratee_context)
|
||||||
for dec in reversed(node.get_decorators()):
|
for dec in reversed(node.get_decorators()):
|
||||||
debug.dbg('decorator: %s %s', dec, values)
|
debug.dbg('decorator: %s %s', dec, values)
|
||||||
|
|||||||
@@ -198,7 +198,7 @@ def _get_buildout_script_paths(search_path):
|
|||||||
|
|
||||||
def dotted_path_in_sys_path(sys_path, module_path):
|
def dotted_path_in_sys_path(sys_path, module_path):
|
||||||
"""
|
"""
|
||||||
Returns the dotted path inside a sys.path.
|
Returns the dotted path inside a sys.path as a list of names.
|
||||||
"""
|
"""
|
||||||
# First remove the suffix.
|
# First remove the suffix.
|
||||||
for suffix in all_suffixes():
|
for suffix in all_suffixes():
|
||||||
@@ -221,6 +221,6 @@ def dotted_path_in_sys_path(sys_path, module_path):
|
|||||||
for string in split:
|
for string in split:
|
||||||
if not string or '.' in string:
|
if not string or '.' in string:
|
||||||
return None
|
return None
|
||||||
return '.'.join(split)
|
return split
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ from jedi._compatibility import reraise
|
|||||||
_sep = os.path.sep
|
_sep = os.path.sep
|
||||||
if os.path.altsep is not None:
|
if os.path.altsep is not None:
|
||||||
_sep += os.path.altsep
|
_sep += os.path.altsep
|
||||||
_path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
|
_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
|
||||||
del _sep
|
del _sep
|
||||||
|
|
||||||
|
|
||||||
@@ -117,38 +117,3 @@ def indent_block(text, indention=' '):
|
|||||||
text = text[:-1]
|
text = text[:-1]
|
||||||
lines = text.split('\n')
|
lines = text.split('\n')
|
||||||
return '\n'.join(map(lambda s: indention + s, lines)) + temp
|
return '\n'.join(map(lambda s: indention + s, lines)) + temp
|
||||||
|
|
||||||
|
|
||||||
def dotted_from_fs_path(fs_path, sys_path):
|
|
||||||
"""
|
|
||||||
Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e.
|
|
||||||
compares the path with sys.path and then returns the dotted_path. If the
|
|
||||||
path is not in the sys.path, just returns None.
|
|
||||||
"""
|
|
||||||
if os.path.basename(fs_path).startswith('__init__.'):
|
|
||||||
# We are calculating the path. __init__ files are not interesting.
|
|
||||||
fs_path = os.path.dirname(fs_path)
|
|
||||||
|
|
||||||
# prefer
|
|
||||||
# - UNIX
|
|
||||||
# /path/to/pythonX.Y/lib-dynload
|
|
||||||
# /path/to/pythonX.Y/site-packages
|
|
||||||
# - Windows
|
|
||||||
# C:\path\to\DLLs
|
|
||||||
# C:\path\to\Lib\site-packages
|
|
||||||
# over
|
|
||||||
# - UNIX
|
|
||||||
# /path/to/pythonX.Y
|
|
||||||
# - Windows
|
|
||||||
# C:\path\to\Lib
|
|
||||||
path = ''
|
|
||||||
for s in sys_path:
|
|
||||||
if (fs_path.startswith(s) and len(path) < len(s)):
|
|
||||||
path = s
|
|
||||||
|
|
||||||
# - Window
|
|
||||||
# X:\path\to\lib-dynload/datetime.pyd => datetime
|
|
||||||
module_path = fs_path[len(path):].lstrip(os.path.sep).lstrip('/')
|
|
||||||
# - Window
|
|
||||||
# Replace like X:\path\to\something/foo/bar.py
|
|
||||||
return _path_re.sub('', module_path).replace(os.path.sep, '.').replace('/', '.')
|
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import re
|
||||||
import textwrap
|
import textwrap
|
||||||
from inspect import cleandoc
|
from inspect import cleandoc
|
||||||
|
|
||||||
@@ -88,10 +89,12 @@ def get_flow_branch_keyword(flow_node, node):
|
|||||||
keyword = first_leaf
|
keyword = first_leaf
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def get_statement_of_position(node, pos):
|
def get_statement_of_position(node, pos):
|
||||||
for c in node.children:
|
for c in node.children:
|
||||||
if c.start_pos <= pos <= c.end_pos:
|
if c.start_pos <= pos <= c.end_pos:
|
||||||
if c.type not in ('decorated', 'simple_stmt', 'suite') \
|
if c.type not in ('decorated', 'simple_stmt', 'suite',
|
||||||
|
'async_stmt', 'async_funcdef') \
|
||||||
and not isinstance(c, (tree.Flow, tree.ClassOrFunc)):
|
and not isinstance(c, (tree.Flow, tree.ClassOrFunc)):
|
||||||
return c
|
return c
|
||||||
else:
|
else:
|
||||||
@@ -156,7 +159,12 @@ def get_call_signature(funcdef, width=72, call_string=None):
|
|||||||
p = '(' + ''.join(param.get_code() for param in funcdef.get_params()).strip() + ')'
|
p = '(' + ''.join(param.get_code() for param in funcdef.get_params()).strip() + ')'
|
||||||
else:
|
else:
|
||||||
p = funcdef.children[2].get_code()
|
p = funcdef.children[2].get_code()
|
||||||
code = call_string + p
|
p = re.sub(r'\s+', ' ', p)
|
||||||
|
if funcdef.annotation:
|
||||||
|
rtype = " ->" + funcdef.annotation.get_code()
|
||||||
|
else:
|
||||||
|
rtype = ""
|
||||||
|
code = call_string + p + rtype
|
||||||
|
|
||||||
return '\n'.join(textwrap.wrap(code, width))
|
return '\n'.join(textwrap.wrap(code, width))
|
||||||
|
|
||||||
@@ -177,6 +185,8 @@ def get_doc_with_call_signature(scope_node):
|
|||||||
doc = clean_scope_docstring(scope_node)
|
doc = clean_scope_docstring(scope_node)
|
||||||
if call_signature is None:
|
if call_signature is None:
|
||||||
return doc
|
return doc
|
||||||
|
if not doc:
|
||||||
|
return call_signature
|
||||||
return '%s\n\n%s' % (call_signature, doc)
|
return '%s\n\n%s' % (call_signature, doc)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -144,7 +144,8 @@ Check for `isinstance` and other information to infer a type.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
auto_import_modules = [
|
auto_import_modules = [
|
||||||
'hashlib', # setattr
|
'hashlib', # hashlib is mostly using setattr, which jedi doesn't understand
|
||||||
|
'gi', # This third-party repository (GTK stuff) doesn't really work with jedi
|
||||||
]
|
]
|
||||||
"""
|
"""
|
||||||
Modules that are not analyzed but imported, although they contain Python code.
|
Modules that are not analyzed but imported, although they contain Python code.
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ def setup_readline(namespace_module=__main__):
|
|||||||
# this code. This didn't use to be an issue until 3.3. Starting with
|
# this code. This didn't use to be an issue until 3.3. Starting with
|
||||||
# 3.4 this is different, it always overwrites the completer if it's not
|
# 3.4 this is different, it always overwrites the completer if it's not
|
||||||
# already imported here.
|
# already imported here.
|
||||||
import rlcompleter
|
import rlcompleter # noqa: F401
|
||||||
import readline
|
import readline
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print("Jedi: Module readline not available.")
|
print("Jedi: Module readline not available.")
|
||||||
@@ -138,5 +138,5 @@ def version_info():
|
|||||||
"""
|
"""
|
||||||
Version = namedtuple('Version', 'major, minor, micro')
|
Version = namedtuple('Version', 'major, minor, micro')
|
||||||
from jedi import __version__
|
from jedi import __version__
|
||||||
tupl = re.findall('[a-z]+|\d+', __version__)
|
tupl = re.findall(r'[a-z]+|\d+', __version__)
|
||||||
return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])
|
return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
parso>=0.2.0
|
parso>=0.3.0
|
||||||
|
|||||||
@@ -1,2 +1,8 @@
|
|||||||
[bdist_wheel]
|
[bdist_wheel]
|
||||||
universal=1
|
universal=1
|
||||||
|
|
||||||
|
[flake8]
|
||||||
|
max-line-length = 100
|
||||||
|
ignore =
|
||||||
|
# do not use bare 'except'
|
||||||
|
E722,
|
||||||
|
|||||||
19
setup.py
19
setup.py
@@ -3,7 +3,6 @@
|
|||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
import ast
|
import ast
|
||||||
import sys
|
|
||||||
|
|
||||||
__AUTHOR__ = 'David Halter'
|
__AUTHOR__ = 'David Halter'
|
||||||
__AUTHOR_EMAIL__ = 'davidhalter88@gmail.com'
|
__AUTHOR_EMAIL__ = 'davidhalter88@gmail.com'
|
||||||
@@ -11,10 +10,7 @@ __AUTHOR_EMAIL__ = 'davidhalter88@gmail.com'
|
|||||||
# Get the version from within jedi. It's defined in exactly one place now.
|
# Get the version from within jedi. It's defined in exactly one place now.
|
||||||
with open('jedi/__init__.py') as f:
|
with open('jedi/__init__.py') as f:
|
||||||
tree = ast.parse(f.read())
|
tree = ast.parse(f.read())
|
||||||
if sys.version_info > (3, 7):
|
version = tree.body[int(not hasattr(tree, 'docstring'))].value.s
|
||||||
version = tree.body[0].value.s
|
|
||||||
else:
|
|
||||||
version = tree.body[1].value.s
|
|
||||||
|
|
||||||
readme = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read()
|
readme = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read()
|
||||||
with open('requirements.txt') as f:
|
with open('requirements.txt') as f:
|
||||||
@@ -33,9 +29,17 @@ setup(name='jedi',
|
|||||||
keywords='python completion refactoring vim',
|
keywords='python completion refactoring vim',
|
||||||
long_description=readme,
|
long_description=readme,
|
||||||
packages=find_packages(exclude=['test', 'test.*']),
|
packages=find_packages(exclude=['test', 'test.*']),
|
||||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',
|
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
|
||||||
install_requires=install_requires,
|
install_requires=install_requires,
|
||||||
extras_require={'dev': ['docopt']},
|
extras_require={
|
||||||
|
'testing': [
|
||||||
|
'pytest>=3.1.0',
|
||||||
|
# docopt for sith doctests
|
||||||
|
'docopt',
|
||||||
|
# coloroma for colored debug output
|
||||||
|
'colorama',
|
||||||
|
],
|
||||||
|
},
|
||||||
package_data={'jedi': ['evaluate/compiled/fake/*.pym']},
|
package_data={'jedi': ['evaluate/compiled/fake/*.pym']},
|
||||||
platforms=['any'],
|
platforms=['any'],
|
||||||
classifiers=[
|
classifiers=[
|
||||||
@@ -47,7 +51,6 @@ setup(name='jedi',
|
|||||||
'Programming Language :: Python :: 2',
|
'Programming Language :: Python :: 2',
|
||||||
'Programming Language :: Python :: 2.7',
|
'Programming Language :: Python :: 2.7',
|
||||||
'Programming Language :: Python :: 3',
|
'Programming Language :: Python :: 3',
|
||||||
'Programming Language :: Python :: 3.3',
|
|
||||||
'Programming Language :: Python :: 3.4',
|
'Programming Language :: Python :: 3.4',
|
||||||
'Programming Language :: Python :: 3.5',
|
'Programming Language :: Python :: 3.5',
|
||||||
'Programming Language :: Python :: 3.6',
|
'Programming Language :: Python :: 3.6',
|
||||||
|
|||||||
@@ -29,6 +29,9 @@ b = [6,7]
|
|||||||
|
|
||||||
#? int()
|
#? int()
|
||||||
b[8-7]
|
b[8-7]
|
||||||
|
# Something unreasonable:
|
||||||
|
#?
|
||||||
|
b['']
|
||||||
|
|
||||||
# -----------------
|
# -----------------
|
||||||
# Slices
|
# Slices
|
||||||
@@ -434,7 +437,7 @@ def test_func():
|
|||||||
#? int()
|
#? int()
|
||||||
tuple({1})[0]
|
tuple({1})[0]
|
||||||
|
|
||||||
# python >= 3.3
|
# python >= 3.4
|
||||||
# -----------------
|
# -----------------
|
||||||
# PEP 3132 Extended Iterable Unpacking (star unpacking)
|
# PEP 3132 Extended Iterable Unpacking (star unpacking)
|
||||||
# -----------------
|
# -----------------
|
||||||
|
|||||||
@@ -73,3 +73,12 @@ async def wrapper():
|
|||||||
asgen().__ane
|
asgen().__ane
|
||||||
#? []
|
#? []
|
||||||
asgen().mro
|
asgen().mro
|
||||||
|
|
||||||
|
|
||||||
|
# Normal completion (#1092)
|
||||||
|
normal_var1 = 42
|
||||||
|
|
||||||
|
async def foo():
|
||||||
|
normal_var2 = False
|
||||||
|
#? ['normal_var1', 'normal_var2']
|
||||||
|
normal_var
|
||||||
|
|||||||
@@ -22,6 +22,9 @@ a(0):.
|
|||||||
0x0
|
0x0
|
||||||
#? ['and', 'or', 'if', 'is', 'in', 'not']
|
#? ['and', 'or', 'if', 'is', 'in', 'not']
|
||||||
1j
|
1j
|
||||||
|
x = None()
|
||||||
|
#?
|
||||||
|
x
|
||||||
|
|
||||||
# -----------------
|
# -----------------
|
||||||
# if/else/elif
|
# if/else/elif
|
||||||
@@ -151,6 +154,9 @@ def global_define():
|
|||||||
#? int()
|
#? int()
|
||||||
global_var_in_func
|
global_var_in_func
|
||||||
|
|
||||||
|
#? ['global_var_in_func']
|
||||||
|
global_var_in_f
|
||||||
|
|
||||||
|
|
||||||
def funct1():
|
def funct1():
|
||||||
# From issue #610
|
# From issue #610
|
||||||
@@ -172,6 +178,7 @@ def init_global_var_predefined():
|
|||||||
#? int() None
|
#? int() None
|
||||||
global_var_predefined
|
global_var_predefined
|
||||||
|
|
||||||
|
|
||||||
# -----------------
|
# -----------------
|
||||||
# within docstrs
|
# within docstrs
|
||||||
# -----------------
|
# -----------------
|
||||||
|
|||||||
@@ -417,6 +417,9 @@ class PrivateVar():
|
|||||||
def __private_func(self):
|
def __private_func(self):
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
#? int()
|
||||||
|
__private_func()
|
||||||
|
|
||||||
def wrap_private(self):
|
def wrap_private(self):
|
||||||
return self.__private_func()
|
return self.__private_func()
|
||||||
#? []
|
#? []
|
||||||
@@ -425,6 +428,8 @@ PrivateVar().__var
|
|||||||
PrivateVar().__var
|
PrivateVar().__var
|
||||||
#? []
|
#? []
|
||||||
PrivateVar().__private_func
|
PrivateVar().__private_func
|
||||||
|
#? []
|
||||||
|
PrivateVar.__private_func
|
||||||
#? int()
|
#? int()
|
||||||
PrivateVar().wrap_private()
|
PrivateVar().wrap_private()
|
||||||
|
|
||||||
|
|||||||
@@ -52,12 +52,12 @@ left
|
|||||||
[a for a in {1:'x'}][0]
|
[a for a in {1:'x'}][0]
|
||||||
|
|
||||||
# list comprehensions should also work in combination with functions
|
# list comprehensions should also work in combination with functions
|
||||||
def listen(arg):
|
def _listen(arg):
|
||||||
for x in arg:
|
for x in arg:
|
||||||
#? str()
|
#? str()
|
||||||
x
|
x
|
||||||
|
|
||||||
listen(['' for x in [1]])
|
_listen(['' for x in [1]])
|
||||||
#?
|
#?
|
||||||
([str for x in []])[0]
|
([str for x in []])[0]
|
||||||
|
|
||||||
@@ -212,3 +212,14 @@ next(iter({a for a in range(10)}))
|
|||||||
|
|
||||||
#? int()
|
#? int()
|
||||||
[a for a in {1, 2, 3}][0]
|
[a for a in {1, 2, 3}][0]
|
||||||
|
|
||||||
|
# -----------------
|
||||||
|
# syntax errors
|
||||||
|
# -----------------
|
||||||
|
|
||||||
|
# Issue #1146
|
||||||
|
|
||||||
|
#? ['list']
|
||||||
|
[int(str(x.value) for x in list
|
||||||
|
|
||||||
|
def reset_missing_bracket(): pass
|
||||||
|
|||||||
@@ -30,13 +30,17 @@ def sphinxy(a, b, c, d, x):
|
|||||||
sphinxy()
|
sphinxy()
|
||||||
|
|
||||||
# wrong declarations
|
# wrong declarations
|
||||||
def sphinxy2(a, b, x):
|
def sphinxy2(a, b, x, y, z):
|
||||||
"""
|
"""
|
||||||
:param a: Forgot type declaration
|
:param a: Forgot type declaration
|
||||||
:type a:
|
:type a:
|
||||||
:param b: Just something
|
:param b: Just something
|
||||||
:type b: ``
|
:type b: ``
|
||||||
:param x: Just something without type
|
:param x: Just something without type
|
||||||
|
:param y: A function
|
||||||
|
:type y: def l(): pass
|
||||||
|
:param z: A keyword
|
||||||
|
:type z: return
|
||||||
:rtype:
|
:rtype:
|
||||||
"""
|
"""
|
||||||
#?
|
#?
|
||||||
@@ -45,6 +49,10 @@ def sphinxy2(a, b, x):
|
|||||||
b
|
b
|
||||||
#?
|
#?
|
||||||
x
|
x
|
||||||
|
#?
|
||||||
|
y
|
||||||
|
#?
|
||||||
|
z
|
||||||
|
|
||||||
#?
|
#?
|
||||||
sphinxy2()
|
sphinxy2()
|
||||||
|
|||||||
@@ -29,6 +29,10 @@ finally:
|
|||||||
x
|
x
|
||||||
x = tuple
|
x = tuple
|
||||||
|
|
||||||
|
if False:
|
||||||
|
with open("") as defined_in_false:
|
||||||
|
#? ['flush']
|
||||||
|
defined_in_false.flu
|
||||||
|
|
||||||
# -----------------
|
# -----------------
|
||||||
# Return checks
|
# Return checks
|
||||||
|
|||||||
@@ -25,3 +25,10 @@ Fr'sasdf'
|
|||||||
|
|
||||||
#? 7 str()
|
#? 7 str()
|
||||||
Fr'''sasdf''' + ''
|
Fr'''sasdf''' + ''
|
||||||
|
|
||||||
|
#? ['upper']
|
||||||
|
f'xyz'.uppe
|
||||||
|
|
||||||
|
|
||||||
|
#? 3 []
|
||||||
|
f'f'
|
||||||
|
|||||||
@@ -319,6 +319,7 @@ exe['c']
|
|||||||
a = 'a'
|
a = 'a'
|
||||||
exe2 = kwargs_func(**{a:3,
|
exe2 = kwargs_func(**{a:3,
|
||||||
'b':4.0})
|
'b':4.0})
|
||||||
|
|
||||||
#? int()
|
#? int()
|
||||||
exe2['a']
|
exe2['a']
|
||||||
#? float()
|
#? float()
|
||||||
@@ -326,6 +327,19 @@ exe2['b']
|
|||||||
#? int() float()
|
#? int() float()
|
||||||
exe2['c']
|
exe2['c']
|
||||||
|
|
||||||
|
exe3 = kwargs_func(**{k: v for k, v in [(a, 3), ('b', 4.0)]})
|
||||||
|
|
||||||
|
# Should resolve to the same as 2 but jedi is not smart enough yet
|
||||||
|
# Here to make sure it doesn't result in crash though
|
||||||
|
#?
|
||||||
|
exe3['a']
|
||||||
|
|
||||||
|
#?
|
||||||
|
exe3['b']
|
||||||
|
|
||||||
|
#?
|
||||||
|
exe3['c']
|
||||||
|
|
||||||
# -----------------
|
# -----------------
|
||||||
# *args / ** kwargs
|
# *args / ** kwargs
|
||||||
# -----------------
|
# -----------------
|
||||||
|
|||||||
@@ -208,11 +208,19 @@ def x():
|
|||||||
#? int()
|
#? int()
|
||||||
next(x())
|
next(x())
|
||||||
|
|
||||||
|
# -----------------
|
||||||
|
# statements
|
||||||
|
# -----------------
|
||||||
|
def x():
|
||||||
|
foo = yield
|
||||||
|
#?
|
||||||
|
foo
|
||||||
|
|
||||||
# -----------------
|
# -----------------
|
||||||
# yield from
|
# yield from
|
||||||
# -----------------
|
# -----------------
|
||||||
|
|
||||||
# python >= 3.3
|
# python >= 3.4
|
||||||
|
|
||||||
def yield_from():
|
def yield_from():
|
||||||
yield from iter([1])
|
yield from iter([1])
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
""" Pep-0484 type hinting """
|
""" Pep-0484 type hinting """
|
||||||
|
|
||||||
# python >= 3.2
|
# python >= 3.4
|
||||||
|
|
||||||
|
|
||||||
class A():
|
class A():
|
||||||
|
|||||||
@@ -129,11 +129,12 @@ class Key:
|
|||||||
class Value:
|
class Value:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def mapping(p, q, d, r, s, t):
|
def mapping(p, q, d, dd, r, s, t):
|
||||||
"""
|
"""
|
||||||
:type p: typing.Mapping[Key, Value]
|
:type p: typing.Mapping[Key, Value]
|
||||||
:type q: typing.MutableMapping[Key, Value]
|
:type q: typing.MutableMapping[Key, Value]
|
||||||
:type d: typing.Dict[Key, Value]
|
:type d: typing.Dict[Key, Value]
|
||||||
|
:type dd: typing.DefaultDict[Key, Value]
|
||||||
:type r: typing.KeysView[Key]
|
:type r: typing.KeysView[Key]
|
||||||
:type s: typing.ValuesView[Value]
|
:type s: typing.ValuesView[Value]
|
||||||
:type t: typing.ItemsView[Key, Value]
|
:type t: typing.ItemsView[Key, Value]
|
||||||
@@ -144,6 +145,8 @@ def mapping(p, q, d, r, s, t):
|
|||||||
q.setd
|
q.setd
|
||||||
#? ["setdefault"]
|
#? ["setdefault"]
|
||||||
d.setd
|
d.setd
|
||||||
|
#? ["setdefault"]
|
||||||
|
dd.setd
|
||||||
#? Value()
|
#? Value()
|
||||||
p[1]
|
p[1]
|
||||||
for key in p:
|
for key in p:
|
||||||
@@ -240,7 +243,37 @@ for key in x.keys():
|
|||||||
for value in x.values():
|
for value in x.values():
|
||||||
#? int()
|
#? int()
|
||||||
value
|
value
|
||||||
# python >= 3.2
|
# python >= 3.4
|
||||||
|
|
||||||
|
class TestDefaultDict(typing.DefaultDict[str, int]):
|
||||||
|
def setdud(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def testdict(x):
|
||||||
|
"""
|
||||||
|
:type x: TestDefaultDict
|
||||||
|
"""
|
||||||
|
#? ["setdud", "setdefault"]
|
||||||
|
x.setd
|
||||||
|
for key in x.keys():
|
||||||
|
#? str()
|
||||||
|
key
|
||||||
|
for value in x.values():
|
||||||
|
#? int()
|
||||||
|
value
|
||||||
|
|
||||||
|
x = TestDefaultDict()
|
||||||
|
#? ["setdud", "setdefault"]
|
||||||
|
x.setd
|
||||||
|
for key in x.keys():
|
||||||
|
#? str()
|
||||||
|
key
|
||||||
|
for value in x.values():
|
||||||
|
#? int()
|
||||||
|
value
|
||||||
|
# python >= 3.4
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
docstrings have some auto-import, annotations can use all of Python's
|
docstrings have some auto-import, annotations can use all of Python's
|
||||||
import logic
|
import logic
|
||||||
|
|||||||
@@ -76,3 +76,18 @@ class InstanceAttributeIfs:
|
|||||||
InstanceAttributeIfs().a1
|
InstanceAttributeIfs().a1
|
||||||
#? int() str()
|
#? int() str()
|
||||||
InstanceAttributeIfs().a2
|
InstanceAttributeIfs().a2
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class A:
|
||||||
|
def a(self, b):
|
||||||
|
for x in [self.a(i) for i in b]:
|
||||||
|
#?
|
||||||
|
x
|
||||||
|
|
||||||
|
class B:
|
||||||
|
def a(self, b):
|
||||||
|
for i in b:
|
||||||
|
for i in self.a(i):
|
||||||
|
#?
|
||||||
|
yield i
|
||||||
|
|||||||
@@ -91,6 +91,12 @@ d.items()[0][0]
|
|||||||
#? int()
|
#? int()
|
||||||
d.items()[0][1]
|
d.items()[0][1]
|
||||||
|
|
||||||
|
(a, b), = {a:1 for a in [1.0]}.items()
|
||||||
|
#? float()
|
||||||
|
a
|
||||||
|
#? int()
|
||||||
|
b
|
||||||
|
|
||||||
# -----------------
|
# -----------------
|
||||||
# tuples
|
# tuples
|
||||||
# -----------------
|
# -----------------
|
||||||
@@ -125,3 +131,58 @@ set_t2 = set()
|
|||||||
|
|
||||||
#? ['clear', 'copy']
|
#? ['clear', 'copy']
|
||||||
set_t2.c
|
set_t2.c
|
||||||
|
|
||||||
|
# -----------------
|
||||||
|
# pep 448 unpacking generalizations
|
||||||
|
# -----------------
|
||||||
|
# python >= 3.5
|
||||||
|
|
||||||
|
d = {'a': 3}
|
||||||
|
dc = {v: 3 for v in ['a']}
|
||||||
|
|
||||||
|
#? dict()
|
||||||
|
{**d}
|
||||||
|
|
||||||
|
#? dict()
|
||||||
|
{**dc}
|
||||||
|
|
||||||
|
#? str()
|
||||||
|
{**d, "b": "b"}["b"]
|
||||||
|
|
||||||
|
#? str()
|
||||||
|
{**dc, "b": "b"}["b"]
|
||||||
|
|
||||||
|
# Should resolve to int() but jedi is not smart enough yet
|
||||||
|
# Here to make sure it doesn't result in crash though
|
||||||
|
#?
|
||||||
|
{**d}["a"]
|
||||||
|
|
||||||
|
# Should resolve to int() but jedi is not smart enough yet
|
||||||
|
# Here to make sure it doesn't result in crash though
|
||||||
|
#?
|
||||||
|
{**dc}["a"]
|
||||||
|
|
||||||
|
s = {1, 2, 3}
|
||||||
|
|
||||||
|
#? set()
|
||||||
|
{*s}
|
||||||
|
|
||||||
|
#? set()
|
||||||
|
{*s, 4, *s}
|
||||||
|
|
||||||
|
s = {1, 2, 3}
|
||||||
|
# Should resolve to int() but jedi is not smart enough yet
|
||||||
|
# Here to make sure it doesn't result in crash though
|
||||||
|
#?
|
||||||
|
{*s}.pop()
|
||||||
|
|
||||||
|
#? int()
|
||||||
|
{*s, 4}.pop()
|
||||||
|
|
||||||
|
# Should resolve to int() but jedi is not smart enough yet
|
||||||
|
# Here to make sure it doesn't result in crash though
|
||||||
|
#?
|
||||||
|
[*s][0]
|
||||||
|
|
||||||
|
#? int()
|
||||||
|
[*s, 4][0]
|
||||||
|
|||||||
@@ -126,11 +126,12 @@ class StaticAnalysisCase(object):
|
|||||||
return "<%s: %s>" % (self.__class__.__name__, os.path.basename(self._path))
|
return "<%s: %s>" % (self.__class__.__name__, os.path.basename(self._path))
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture(scope='session')
|
||||||
def venv_path(tmpdir, environment):
|
def venv_path(tmpdir_factory, environment):
|
||||||
if environment.version_info.major < 3:
|
if environment.version_info.major < 3:
|
||||||
pytest.skip("python -m venv does not exist in Python 2")
|
pytest.skip("python -m venv does not exist in Python 2")
|
||||||
|
|
||||||
|
tmpdir = tmpdir_factory.mktemp('venv_path')
|
||||||
dirname = os.path.join(tmpdir.dirname, 'venv')
|
dirname = os.path.join(tmpdir.dirname, 'venv')
|
||||||
|
|
||||||
# We cannot use the Python from tox because tox creates virtualenvs and
|
# We cannot use the Python from tox because tox creates virtualenvs and
|
||||||
|
|||||||
1
test/examples/namespace_package_relative_import/rel1.py
Normal file
1
test/examples/namespace_package_relative_import/rel1.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
from .rel2 import name
|
||||||
1
test/examples/namespace_package_relative_import/rel2.py
Normal file
1
test/examples/namespace_package_relative_import/rel2.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
name = 1
|
||||||
@@ -285,3 +285,11 @@ def test_backslash_continuation_and_bracket(Script):
|
|||||||
column = lines[-1].index('(')
|
column = lines[-1].index('(')
|
||||||
def_, = Script(code, line=len(lines), column=column).goto_definitions()
|
def_, = Script(code, line=len(lines), column=column).goto_definitions()
|
||||||
assert def_.name == 'int'
|
assert def_.name == 'int'
|
||||||
|
|
||||||
|
|
||||||
|
def test_goto_follow_builtin_imports(Script):
|
||||||
|
s = Script('import sys; sys')
|
||||||
|
d, = s.goto_assignments(follow_imports=True)
|
||||||
|
assert d.in_builtin_module() is True
|
||||||
|
d, = s.goto_assignments(follow_imports=True, follow_builtin_imports=True)
|
||||||
|
assert d.in_builtin_module() is False
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import pytest
|
|||||||
|
|
||||||
from ..helpers import TestCase
|
from ..helpers import TestCase
|
||||||
from jedi import cache
|
from jedi import cache
|
||||||
from jedi._compatibility import is_py33
|
from jedi._compatibility import is_py3
|
||||||
|
|
||||||
|
|
||||||
def assert_signature(Script, source, expected_name, expected_index=0, line=None, column=None):
|
def assert_signature(Script, source, expected_name, expected_index=0, line=None, column=None):
|
||||||
@@ -247,7 +247,7 @@ def _params(Script, source, line=None, column=None):
|
|||||||
|
|
||||||
|
|
||||||
def test_param_name(Script):
|
def test_param_name(Script):
|
||||||
if not is_py33:
|
if not is_py3:
|
||||||
p = _params(Script, '''int(''')
|
p = _params(Script, '''int(''')
|
||||||
# int is defined as: `int(x[, base])`
|
# int is defined as: `int(x[, base])`
|
||||||
assert p[0].name == 'x'
|
assert p[0].name == 'x'
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
def test_in_whitespace(Script):
|
def test_in_whitespace(Script):
|
||||||
code = dedent('''
|
code = dedent('''
|
||||||
@@ -118,3 +120,23 @@ def test_generator(Script):
|
|||||||
def test_in_comment(Script):
|
def test_in_comment(Script):
|
||||||
assert Script(" # Comment").completions()
|
assert Script(" # Comment").completions()
|
||||||
assert Script("max_attr_value = int(2) # Cast to int for spe").completions()
|
assert Script("max_attr_value = int(2) # Cast to int for spe").completions()
|
||||||
|
|
||||||
|
|
||||||
|
def test_async(Script, environment):
|
||||||
|
if environment.version_info < (3, 5):
|
||||||
|
pytest.skip()
|
||||||
|
|
||||||
|
code = dedent('''
|
||||||
|
foo = 3
|
||||||
|
async def x():
|
||||||
|
hey = 3
|
||||||
|
ho'''
|
||||||
|
)
|
||||||
|
comps = Script(code, column=4).completions()
|
||||||
|
names = [c.name for c in comps]
|
||||||
|
assert 'foo' in names
|
||||||
|
assert 'hey' in names
|
||||||
|
|
||||||
|
|
||||||
|
def test_with_stmt_error_recovery(Script):
|
||||||
|
assert Script('with open('') as foo: foo.\na', line=1).completions()
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
import os
|
import os
|
||||||
from contextlib import contextmanager
|
import sys
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
import jedi
|
import jedi
|
||||||
from jedi._compatibility import py_version
|
from jedi._compatibility import py_version
|
||||||
from jedi.api.environment import get_default_environment, find_virtualenvs, \
|
from jedi.api.environment import get_default_environment, find_virtualenvs, \
|
||||||
InvalidPythonEnvironment, find_system_environments, get_system_environment
|
InvalidPythonEnvironment, find_system_environments, \
|
||||||
|
get_system_environment, create_environment, get_cached_default_environment
|
||||||
|
|
||||||
|
|
||||||
def test_sys_path():
|
def test_sys_path():
|
||||||
@@ -42,7 +43,7 @@ def test_versions(version):
|
|||||||
|
|
||||||
def test_load_module(evaluator):
|
def test_load_module(evaluator):
|
||||||
access_path = evaluator.compiled_subprocess.load_module(
|
access_path = evaluator.compiled_subprocess.load_module(
|
||||||
name=u'math',
|
dotted_name=u'math',
|
||||||
sys_path=evaluator.get_sys_path()
|
sys_path=evaluator.get_sys_path()
|
||||||
)
|
)
|
||||||
name, access_handle = access_path.accesses[0]
|
name, access_handle = access_path.accesses[0]
|
||||||
@@ -83,31 +84,66 @@ def test_killed_subprocess(evaluator, Script):
|
|||||||
assert def_.name == 'str'
|
assert def_.name == 'str'
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
def test_not_existing_virtualenv(monkeypatch):
|
||||||
def set_environment_variable(name, value):
|
|
||||||
tmp = os.environ.get(name)
|
|
||||||
try:
|
|
||||||
os.environ[name] = value
|
|
||||||
yield
|
|
||||||
finally:
|
|
||||||
if tmp is None:
|
|
||||||
del os.environ[name]
|
|
||||||
else:
|
|
||||||
os.environ[name] = tmp
|
|
||||||
|
|
||||||
|
|
||||||
def test_not_existing_virtualenv():
|
|
||||||
"""Should not match the path that was given"""
|
"""Should not match the path that was given"""
|
||||||
path = '/foo/bar/jedi_baz'
|
path = '/foo/bar/jedi_baz'
|
||||||
with set_environment_variable('VIRTUAL_ENV', path):
|
monkeypatch.setenv('VIRTUAL_ENV', path)
|
||||||
assert get_default_environment().executable != path
|
assert get_default_environment().executable != path
|
||||||
|
|
||||||
|
|
||||||
def test_working_venv(venv_path):
|
def test_working_venv(venv_path, monkeypatch):
|
||||||
with set_environment_variable('VIRTUAL_ENV', venv_path):
|
monkeypatch.setenv('VIRTUAL_ENV', venv_path)
|
||||||
assert get_default_environment().path == venv_path
|
assert get_default_environment().path == venv_path
|
||||||
|
|
||||||
|
|
||||||
def test_scanning_venvs(venv_path):
|
def test_scanning_venvs(venv_path):
|
||||||
parent_dir = os.path.dirname(venv_path)
|
parent_dir = os.path.dirname(venv_path)
|
||||||
assert any(venv.path == venv_path for venv in find_virtualenvs([parent_dir]))
|
assert any(venv.path == venv_path
|
||||||
|
for venv in find_virtualenvs([parent_dir]))
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_environment_venv_path(venv_path):
|
||||||
|
environment = create_environment(venv_path)
|
||||||
|
assert environment.path == venv_path
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_environment_executable():
|
||||||
|
environment = create_environment(sys.executable)
|
||||||
|
assert environment.executable == sys.executable
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_default_environment_from_env_does_not_use_safe(tmpdir, monkeypatch):
|
||||||
|
fake_python = os.path.join(str(tmpdir), 'fake_python')
|
||||||
|
with open(fake_python, 'w') as f:
|
||||||
|
f.write('')
|
||||||
|
|
||||||
|
def _get_subprocess(self):
|
||||||
|
if self._start_executable != fake_python:
|
||||||
|
raise RuntimeError('Should not get called!')
|
||||||
|
self.executable = fake_python
|
||||||
|
self.path = 'fake'
|
||||||
|
|
||||||
|
monkeypatch.setattr('jedi.api.environment.Environment._get_subprocess',
|
||||||
|
_get_subprocess)
|
||||||
|
|
||||||
|
monkeypatch.setenv('VIRTUAL_ENV', fake_python)
|
||||||
|
env = get_default_environment()
|
||||||
|
assert env.path == 'fake'
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('virtualenv', ['', 'fufuuuuu', sys.prefix])
|
||||||
|
def test_get_default_environment_when_embedded(monkeypatch, virtualenv):
|
||||||
|
# When using Python embedded, sometimes the executable is not a Python
|
||||||
|
# executable.
|
||||||
|
executable_name = 'RANDOM_EXE'
|
||||||
|
monkeypatch.setattr(sys, 'executable', executable_name)
|
||||||
|
monkeypatch.setenv('VIRTUAL_ENV', virtualenv)
|
||||||
|
env = get_default_environment()
|
||||||
|
assert env.executable != executable_name
|
||||||
|
|
||||||
|
|
||||||
|
def test_changing_venv(venv_path, monkeypatch):
|
||||||
|
monkeypatch.setitem(os.environ, 'VIRTUAL_ENV', venv_path)
|
||||||
|
get_cached_default_environment()
|
||||||
|
monkeypatch.setitem(os.environ, 'VIRTUAL_ENV', sys.executable)
|
||||||
|
assert get_cached_default_environment().executable == sys.executable
|
||||||
|
|||||||
@@ -341,3 +341,18 @@ def test_dir_magic_method():
|
|||||||
|
|
||||||
foo = [c for c in completions if c.name == 'foo'][0]
|
foo = [c for c in completions if c.name == 'foo'][0]
|
||||||
assert foo._goto_definitions() == []
|
assert foo._goto_definitions() == []
|
||||||
|
|
||||||
|
|
||||||
|
def test_name_not_findable():
|
||||||
|
class X():
|
||||||
|
if 0:
|
||||||
|
NOT_FINDABLE
|
||||||
|
|
||||||
|
def hidden(self):
|
||||||
|
return
|
||||||
|
|
||||||
|
hidden.__name__ = 'NOT_FINDABLE'
|
||||||
|
|
||||||
|
setattr(X, 'NOT_FINDABLE', X.hidden)
|
||||||
|
|
||||||
|
assert jedi.Interpreter("X.NOT_FINDA", [locals()]).completions()
|
||||||
|
|||||||
@@ -72,3 +72,9 @@ def test_wrong_encoding(Script, tmpdir):
|
|||||||
|
|
||||||
c, = Script('import x; x.foo', sys_path=[tmpdir.strpath]).completions()
|
c, = Script('import x; x.foo', sys_path=[tmpdir.strpath]).completions()
|
||||||
assert c.name == 'foobar'
|
assert c.name == 'foobar'
|
||||||
|
|
||||||
|
|
||||||
|
def test_encoding_parameter(Script):
|
||||||
|
name = u('hö')
|
||||||
|
s = Script(name.encode('latin-1'), encoding='latin-1')
|
||||||
|
assert s._module_node.get_code() == name
|
||||||
|
|||||||
@@ -1,3 +1,14 @@
|
|||||||
def test_import_usage(Script):
|
def test_import_usage(Script):
|
||||||
s = Script("from .. import foo", line=1, column=18, path="foo.py")
|
s = Script("from .. import foo", line=1, column=18, path="foo.py")
|
||||||
assert [usage.line for usage in s.usages()] == [1]
|
assert [usage.line for usage in s.usages()] == [1]
|
||||||
|
|
||||||
|
|
||||||
|
def test_exclude_builtin_modules(Script):
|
||||||
|
def get(include):
|
||||||
|
return [(d.line, d.column) for d in Script(source, column=8).usages(include_builtins=include)]
|
||||||
|
source = '''import sys\nprint(sys.path)'''
|
||||||
|
places = get(include=True)
|
||||||
|
assert places == [(None, None), (1, 7), (2, 6)]
|
||||||
|
|
||||||
|
places = get(include=False)
|
||||||
|
assert places == [(1, 7), (2, 6)]
|
||||||
|
|||||||
26
test/test_compatibility.py
Normal file
26
test/test_compatibility.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
from collections import namedtuple
|
||||||
|
from jedi._compatibility import highest_pickle_protocol
|
||||||
|
|
||||||
|
|
||||||
|
def test_highest_pickle_protocol():
|
||||||
|
v = namedtuple('version', 'major, minor')
|
||||||
|
assert highest_pickle_protocol([v(2, 7), v(2, 7)]) == 2
|
||||||
|
assert highest_pickle_protocol([v(2, 7), v(3, 3)]) == 2
|
||||||
|
assert highest_pickle_protocol([v(2, 7), v(3, 4)]) == 2
|
||||||
|
assert highest_pickle_protocol([v(2, 7), v(3, 5)]) == 2
|
||||||
|
assert highest_pickle_protocol([v(2, 7), v(3, 6)]) == 2
|
||||||
|
assert highest_pickle_protocol([v(3, 3), v(2, 7)]) == 2
|
||||||
|
assert highest_pickle_protocol([v(3, 3), v(3, 3)]) == 3
|
||||||
|
assert highest_pickle_protocol([v(3, 3), v(3, 4)]) == 3
|
||||||
|
assert highest_pickle_protocol([v(3, 3), v(3, 5)]) == 3
|
||||||
|
assert highest_pickle_protocol([v(3, 3), v(3, 6)]) == 3
|
||||||
|
assert highest_pickle_protocol([v(3, 4), v(2, 7)]) == 2
|
||||||
|
assert highest_pickle_protocol([v(3, 4), v(3, 3)]) == 3
|
||||||
|
assert highest_pickle_protocol([v(3, 4), v(3, 4)]) == 4
|
||||||
|
assert highest_pickle_protocol([v(3, 4), v(3, 5)]) == 4
|
||||||
|
assert highest_pickle_protocol([v(3, 4), v(3, 6)]) == 4
|
||||||
|
assert highest_pickle_protocol([v(3, 6), v(2, 7)]) == 2
|
||||||
|
assert highest_pickle_protocol([v(3, 6), v(3, 3)]) == 3
|
||||||
|
assert highest_pickle_protocol([v(3, 6), v(3, 4)]) == 4
|
||||||
|
assert highest_pickle_protocol([v(3, 6), v(3, 5)]) == 4
|
||||||
|
assert highest_pickle_protocol([v(3, 6), v(3, 6)]) == 4
|
||||||
@@ -70,11 +70,7 @@ def test_method_completion(Script, environment):
|
|||||||
|
|
||||||
foo = Foo()
|
foo = Foo()
|
||||||
foo.bar.__func__''')
|
foo.bar.__func__''')
|
||||||
if environment.version_info.major > 2:
|
assert [c.name for c in Script(code).completions()] == ['__func__']
|
||||||
result = []
|
|
||||||
else:
|
|
||||||
result = ['__func__']
|
|
||||||
assert [c.name for c in Script(code).completions()] == result
|
|
||||||
|
|
||||||
|
|
||||||
def test_time_docstring(Script):
|
def test_time_docstring(Script):
|
||||||
|
|||||||
@@ -142,6 +142,16 @@ def test_docstring_keyword(Script):
|
|||||||
assert 'assert' in completions[0].docstring()
|
assert 'assert' in completions[0].docstring()
|
||||||
|
|
||||||
|
|
||||||
|
def test_docstring_params_formatting(Script):
|
||||||
|
defs = Script("""
|
||||||
|
def func(param1,
|
||||||
|
param2,
|
||||||
|
param3):
|
||||||
|
pass
|
||||||
|
func""").goto_definitions()
|
||||||
|
assert defs[0].docstring() == 'func(param1, param2, param3)'
|
||||||
|
|
||||||
|
|
||||||
# ---- Numpy Style Tests ---
|
# ---- Numpy Style Tests ---
|
||||||
|
|
||||||
@pytest.mark.skipif(numpydoc_unavailable,
|
@pytest.mark.skipif(numpydoc_unavailable,
|
||||||
|
|||||||
@@ -93,3 +93,13 @@ def test_namespace_package_in_multiple_directories_goto_definition(Script):
|
|||||||
script = Script(sys_path=sys_path, source=CODE)
|
script = Script(sys_path=sys_path, source=CODE)
|
||||||
result = script.goto_definitions()
|
result = script.goto_definitions()
|
||||||
assert len(result) == 1
|
assert len(result) == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_namespace_name_autocompletion_full_name(Script):
|
||||||
|
CODE = 'from pk'
|
||||||
|
sys_path = [join(dirname(__file__), d)
|
||||||
|
for d in ['implicit_namespace_package/ns1', 'implicit_namespace_package/ns2']]
|
||||||
|
|
||||||
|
script = Script(sys_path=sys_path, source=CODE)
|
||||||
|
compl = script.completions()
|
||||||
|
assert set(c.full_name for c in compl) == set(['pkg'])
|
||||||
|
|||||||
@@ -87,10 +87,10 @@ def test_import_not_in_sys_path(Script):
|
|||||||
("from flask.ext.", "bar"),
|
("from flask.ext.", "bar"),
|
||||||
("from flask.ext.", "baz"),
|
("from flask.ext.", "baz"),
|
||||||
("from flask.ext.", "moo"),
|
("from flask.ext.", "moo"),
|
||||||
pytest.mark.xfail(("import flask.ext.foo; flask.ext.foo.", "Foo")),
|
pytest.param("import flask.ext.foo; flask.ext.foo.", "Foo", marks=pytest.mark.xfail),
|
||||||
pytest.mark.xfail(("import flask.ext.bar; flask.ext.bar.", "Foo")),
|
pytest.param("import flask.ext.bar; flask.ext.bar.", "Foo", marks=pytest.mark.xfail),
|
||||||
pytest.mark.xfail(("import flask.ext.baz; flask.ext.baz.", "Foo")),
|
pytest.param("import flask.ext.baz; flask.ext.baz.", "Foo", marks=pytest.mark.xfail),
|
||||||
pytest.mark.xfail(("import flask.ext.moo; flask.ext.moo.", "Foo")),
|
pytest.param("import flask.ext.moo; flask.ext.moo.", "Foo", marks=pytest.mark.xfail),
|
||||||
])
|
])
|
||||||
def test_flask_ext(Script, code, name):
|
def test_flask_ext(Script, code, name):
|
||||||
"""flask.ext.foo is really imported from flaskext.foo or flask_foo.
|
"""flask.ext.foo is really imported from flaskext.foo or flask_foo.
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
from os.path import dirname, join
|
from os.path import dirname, join
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
import py
|
||||||
|
|
||||||
|
from ..helpers import get_example_dir
|
||||||
|
|
||||||
|
|
||||||
SYS_PATH = [join(dirname(__file__), d)
|
SYS_PATH = [join(dirname(__file__), d)
|
||||||
@@ -72,3 +75,22 @@ def test_nested_namespace_package(Script):
|
|||||||
result = script.goto_definitions()
|
result = script.goto_definitions()
|
||||||
|
|
||||||
assert len(result) == 1
|
assert len(result) == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_relative_import(Script, environment, tmpdir):
|
||||||
|
"""
|
||||||
|
Attempt a relative import in a very simple namespace package.
|
||||||
|
"""
|
||||||
|
if environment.version_info < (3, 4):
|
||||||
|
pytest.skip()
|
||||||
|
|
||||||
|
directory = get_example_dir('namespace_package_relative_import')
|
||||||
|
# Need to copy the content in a directory where there's no __init__.py.
|
||||||
|
py.path.local(directory).copy(tmpdir)
|
||||||
|
file_path = join(tmpdir.strpath, "rel1.py")
|
||||||
|
script = Script(path=file_path, line=1)
|
||||||
|
d, = script.goto_definitions()
|
||||||
|
assert d.name == 'int'
|
||||||
|
d, = script.goto_assignments()
|
||||||
|
assert d.name == 'name'
|
||||||
|
assert d.module_name == 'rel2'
|
||||||
|
|||||||
@@ -10,8 +10,10 @@ import os
|
|||||||
import shutil
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
import jedi
|
import jedi
|
||||||
from ..helpers import cwd_at
|
from jedi.api.environment import SameEnvironment
|
||||||
|
|
||||||
|
|
||||||
SRC = """class Foo:
|
SRC = """class Foo:
|
||||||
@@ -22,35 +24,44 @@ class Bar:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def generate_pyc():
|
@pytest.fixture
|
||||||
os.mkdir("dummy_package")
|
def pyc_project_path(tmpdir):
|
||||||
with open("dummy_package/__init__.py", 'w'):
|
path = tmpdir.strpath
|
||||||
|
dummy_package_path = os.path.join(path, "dummy_package")
|
||||||
|
os.mkdir(dummy_package_path)
|
||||||
|
with open(os.path.join(dummy_package_path, "__init__.py"), 'w'):
|
||||||
pass
|
pass
|
||||||
with open("dummy_package/dummy.py", 'w') as f:
|
|
||||||
|
dummy_path = os.path.join(dummy_package_path, 'dummy.py')
|
||||||
|
with open(dummy_path, 'w') as f:
|
||||||
f.write(SRC)
|
f.write(SRC)
|
||||||
import compileall
|
import compileall
|
||||||
compileall.compile_file("dummy_package/dummy.py")
|
compileall.compile_file(dummy_path)
|
||||||
os.remove("dummy_package/dummy.py")
|
os.remove(dummy_path)
|
||||||
|
|
||||||
if sys.version_info[0] == 3:
|
if sys.version_info.major == 3:
|
||||||
# Python3 specific:
|
# Python3 specific:
|
||||||
# To import pyc modules, we must move them out of the __pycache__
|
# To import pyc modules, we must move them out of the __pycache__
|
||||||
# directory and rename them to remove ".cpython-%s%d"
|
# directory and rename them to remove ".cpython-%s%d"
|
||||||
# see: http://stackoverflow.com/questions/11648440/python-does-not-detect-pyc-files
|
# see: http://stackoverflow.com/questions/11648440/python-does-not-detect-pyc-files
|
||||||
for f in os.listdir("dummy_package/__pycache__"):
|
pycache = os.path.join(dummy_package_path, "__pycache__")
|
||||||
|
for f in os.listdir(pycache):
|
||||||
dst = f.replace('.cpython-%s%s' % sys.version_info[:2], "")
|
dst = f.replace('.cpython-%s%s' % sys.version_info[:2], "")
|
||||||
dst = os.path.join("dummy_package", dst)
|
dst = os.path.join(dummy_package_path, dst)
|
||||||
shutil.copy(os.path.join("dummy_package/__pycache__", f), dst)
|
shutil.copy(os.path.join(pycache, f), dst)
|
||||||
|
try:
|
||||||
|
yield path
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(path)
|
||||||
|
|
||||||
|
|
||||||
@cwd_at('test/test_evaluate')
|
def test_pyc(pyc_project_path):
|
||||||
def test_pyc(Script):
|
|
||||||
"""
|
"""
|
||||||
The list of completion must be greater than 2.
|
The list of completion must be greater than 2.
|
||||||
"""
|
"""
|
||||||
try:
|
path = os.path.join(pyc_project_path, 'blub.py')
|
||||||
generate_pyc()
|
s = jedi.Script(
|
||||||
s = jedi.Script("from dummy_package import dummy; dummy.", path='blub.py')
|
"from dummy_package import dummy; dummy.",
|
||||||
assert len(s.completions()) >= 2
|
path=path,
|
||||||
finally:
|
environment=SameEnvironment())
|
||||||
shutil.rmtree("dummy_package")
|
assert len(s.completions()) >= 2
|
||||||
|
|||||||
@@ -29,10 +29,8 @@ def test_paths_from_assignment(Script):
|
|||||||
def test_venv_and_pths(venv_path):
|
def test_venv_and_pths(venv_path):
|
||||||
pjoin = os.path.join
|
pjoin = os.path.join
|
||||||
|
|
||||||
virtualenv = create_environment(venv_path)
|
|
||||||
|
|
||||||
CUR_DIR = os.path.dirname(__file__)
|
CUR_DIR = os.path.dirname(__file__)
|
||||||
site_pkg_path = pjoin(virtualenv.path, 'lib')
|
site_pkg_path = pjoin(venv_path, 'lib')
|
||||||
if os.name == 'nt':
|
if os.name == 'nt':
|
||||||
site_pkg_path = pjoin(site_pkg_path, 'site-packages')
|
site_pkg_path = pjoin(site_pkg_path, 'site-packages')
|
||||||
else:
|
else:
|
||||||
@@ -40,6 +38,7 @@ def test_venv_and_pths(venv_path):
|
|||||||
shutil.rmtree(site_pkg_path)
|
shutil.rmtree(site_pkg_path)
|
||||||
shutil.copytree(pjoin(CUR_DIR, 'sample_venvs', 'pth_directory'), site_pkg_path)
|
shutil.copytree(pjoin(CUR_DIR, 'sample_venvs', 'pth_directory'), site_pkg_path)
|
||||||
|
|
||||||
|
virtualenv = create_environment(venv_path)
|
||||||
venv_paths = virtualenv.get_sys_path()
|
venv_paths = virtualenv.get_sys_path()
|
||||||
|
|
||||||
ETALON = [
|
ETALON = [
|
||||||
|
|||||||
@@ -75,7 +75,8 @@ def test_hex_values_in_docstring():
|
|||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
'code,call_signature', [
|
'code,call_signature', [
|
||||||
('def my_function(x, y, z) -> str:\n return', 'my_function(x, y, z)'),
|
('def my_function(x, typed: Type, z):\n return', 'my_function(x, typed: Type, z)'),
|
||||||
|
('def my_function(x, y, z) -> str:\n return', 'my_function(x, y, z) -> str'),
|
||||||
('lambda x, y, z: x + y * z\n', '<lambda>(x, y, z)')
|
('lambda x, y, z: x + y * z\n', '<lambda>(x, y, z)')
|
||||||
])
|
])
|
||||||
def test_get_call_signature(code, call_signature):
|
def test_get_call_signature(code, call_signature):
|
||||||
@@ -84,4 +85,4 @@ def test_get_call_signature(code, call_signature):
|
|||||||
node = node.children[0]
|
node = node.children[0]
|
||||||
assert parser_utils.get_call_signature(node) == call_signature
|
assert parser_utils.get_call_signature(node) == call_signature
|
||||||
|
|
||||||
assert parser_utils.get_doc_with_call_signature(node) == (call_signature + '\n\n')
|
assert parser_utils.get_doc_with_call_signature(node) == call_signature
|
||||||
|
|||||||
19
test/test_settings.py
Normal file
19
test/test_settings.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from jedi import settings
|
||||||
|
from jedi.evaluate.compiled import CompiledContextName
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def auto_import_json(monkeypatch):
|
||||||
|
monkeypatch.setattr(settings, 'auto_import_modules', ['json'])
|
||||||
|
|
||||||
|
|
||||||
|
def test_base_auto_import_modules(auto_import_json, Script):
|
||||||
|
loads, = Script('import json; json.loads').goto_definitions()
|
||||||
|
assert isinstance(loads._name, CompiledContextName)
|
||||||
|
|
||||||
|
|
||||||
|
def test_auto_import_modules_imports(auto_import_json, Script):
|
||||||
|
main, = Script('from json import tool; tool.main').goto_definitions()
|
||||||
|
assert isinstance(main._name, CompiledContextName)
|
||||||
30
tox.ini
30
tox.ini
@@ -1,14 +1,10 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py27, py33, py34, py35, py36
|
envlist = py27, py34, py35, py36
|
||||||
[testenv]
|
[testenv]
|
||||||
deps =
|
extras = testing
|
||||||
pytest>=2.3.5, < 3.3
|
# Overwrite the parso version (only used sometimes).
|
||||||
pytest-cache
|
# deps =
|
||||||
# docopt for sith doctests
|
# git+https://github.com/davidhalter/parso.git
|
||||||
docopt
|
|
||||||
# coloroma for colored debug output
|
|
||||||
colorama
|
|
||||||
-rrequirements.txt
|
|
||||||
passenv = JEDI_TEST_ENVIRONMENT
|
passenv = JEDI_TEST_ENVIRONMENT
|
||||||
setenv =
|
setenv =
|
||||||
# https://github.com/tomchristie/django-rest-framework/issues/1957
|
# https://github.com/tomchristie/django-rest-framework/issues/1957
|
||||||
@@ -17,46 +13,34 @@ setenv =
|
|||||||
# To test Jedi in different versions than the same Python version, set a
|
# To test Jedi in different versions than the same Python version, set a
|
||||||
# different test environment.
|
# different test environment.
|
||||||
env27: JEDI_TEST_ENVIRONMENT=27
|
env27: JEDI_TEST_ENVIRONMENT=27
|
||||||
env33: JEDI_TEST_ENVIRONMENT=33
|
|
||||||
env34: JEDI_TEST_ENVIRONMENT=34
|
env34: JEDI_TEST_ENVIRONMENT=34
|
||||||
env35: JEDI_TEST_ENVIRONMENT=35
|
env35: JEDI_TEST_ENVIRONMENT=35
|
||||||
env36: JEDI_TEST_ENVIRONMENT=36
|
env36: JEDI_TEST_ENVIRONMENT=36
|
||||||
env37: JEDI_TEST_ENVIRONMENT=37
|
env37: JEDI_TEST_ENVIRONMENT=37
|
||||||
commands =
|
commands =
|
||||||
# Overwrite the parso version (only used sometimes).
|
pytest {posargs:jedi test}
|
||||||
# pip install git+https://github.com/davidhalter/parso.git
|
|
||||||
py.test {posargs:jedi test}
|
|
||||||
[testenv:py27]
|
[testenv:py27]
|
||||||
deps =
|
deps =
|
||||||
# for testing the typing module
|
# for testing the typing module
|
||||||
typing
|
typing
|
||||||
# numpydoc for typing scipy stack
|
# numpydoc for typing scipy stack
|
||||||
numpydoc
|
numpydoc
|
||||||
{[testenv]deps}
|
|
||||||
[testenv:py33]
|
|
||||||
deps =
|
|
||||||
typing
|
|
||||||
{[testenv]deps}
|
|
||||||
[testenv:py34]
|
[testenv:py34]
|
||||||
deps =
|
deps =
|
||||||
typing
|
typing
|
||||||
numpydoc
|
numpydoc
|
||||||
{[testenv]deps}
|
|
||||||
[testenv:py35]
|
[testenv:py35]
|
||||||
deps =
|
deps =
|
||||||
numpydoc
|
numpydoc
|
||||||
{[testenv]deps}
|
|
||||||
[testenv:py36]
|
[testenv:py36]
|
||||||
deps =
|
deps =
|
||||||
numpydoc
|
numpydoc
|
||||||
{[testenv]deps}
|
|
||||||
[testenv:cov]
|
[testenv:cov]
|
||||||
deps =
|
deps =
|
||||||
coverage
|
coverage
|
||||||
numpydoc
|
numpydoc
|
||||||
{[testenv]deps}
|
|
||||||
commands =
|
commands =
|
||||||
coverage run --source jedi -m py.test
|
coverage run --source jedi -m pytest
|
||||||
coverage report
|
coverage report
|
||||||
[testenv:sith]
|
[testenv:sith]
|
||||||
commands =
|
commands =
|
||||||
|
|||||||
@@ -8,10 +8,6 @@ set -e
|
|||||||
sudo chown root: /opt/python/3.6/bin/python
|
sudo chown root: /opt/python/3.6/bin/python
|
||||||
sudo chown root: /opt/python/3.6.3/bin/python
|
sudo chown root: /opt/python/3.6.3/bin/python
|
||||||
|
|
||||||
if [[ $JEDI_TEST_ENVIRONMENT == "33" ]]; then
|
|
||||||
VERSION=3.3
|
|
||||||
DOWNLOAD=1
|
|
||||||
fi
|
|
||||||
if [[ $JEDI_TEST_ENVIRONMENT == "35" ]]; then
|
if [[ $JEDI_TEST_ENVIRONMENT == "35" ]]; then
|
||||||
VERSION=3.5
|
VERSION=3.5
|
||||||
DOWNLOAD=1
|
DOWNLOAD=1
|
||||||
|
|||||||
Reference in New Issue
Block a user