mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-06 22:14:27 +08:00
Merge branch 'master' of github.com:davidhalter/jedi
This commit is contained in:
@@ -4,7 +4,6 @@ omit =
|
||||
jedi/inference/compiled/subprocess/__main__.py
|
||||
jedi/__main__.py
|
||||
# For now this is not being used.
|
||||
jedi/refactoring.py
|
||||
|
||||
[report]
|
||||
# Regexes for lines to exclude from consideration
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -13,3 +13,4 @@ jedi.egg-info/
|
||||
record.json
|
||||
/.cache/
|
||||
/.pytest_cache
|
||||
/venv/
|
||||
|
||||
21
.travis.yml
21
.travis.yml
@@ -1,20 +1,18 @@
|
||||
dist: xenial
|
||||
language: python
|
||||
python:
|
||||
- 2.7
|
||||
- 3.4
|
||||
- 3.5
|
||||
- 3.6
|
||||
- 3.7
|
||||
- 3.8
|
||||
- 3.7
|
||||
- 3.6
|
||||
- 3.5
|
||||
- 2.7
|
||||
|
||||
env:
|
||||
- JEDI_TEST_ENVIRONMENT=27
|
||||
- JEDI_TEST_ENVIRONMENT=34
|
||||
- JEDI_TEST_ENVIRONMENT=35
|
||||
- JEDI_TEST_ENVIRONMENT=36
|
||||
- JEDI_TEST_ENVIRONMENT=37
|
||||
- JEDI_TEST_ENVIRONMENT=38
|
||||
- JEDI_TEST_ENVIRONMENT=37
|
||||
- JEDI_TEST_ENVIRONMENT=36
|
||||
- JEDI_TEST_ENVIRONMENT=35
|
||||
- JEDI_TEST_ENVIRONMENT=27
|
||||
- JEDI_TEST_ENVIRONMENT=interpreter
|
||||
|
||||
matrix:
|
||||
@@ -42,7 +40,8 @@ script:
|
||||
python_bin=python$test_env_version
|
||||
python_path="$(which $python_bin || true)"
|
||||
if [ -z "$python_path" ]; then
|
||||
# Only required for JEDI_TEST_ENVIRONMENT=34.
|
||||
# Only required for JEDI_TEST_ENVIRONMENT=38, because it's not always
|
||||
# available.
|
||||
download_name=python-$test_env_version
|
||||
wget https://s3.amazonaws.com/travis-python-archives/binaries/ubuntu/16.04/x86_64/$download_name.tar.bz2
|
||||
sudo tar xjf $download_name.tar.bz2 --directory / opt/python
|
||||
|
||||
108
AUTHORS.txt
108
AUTHORS.txt
@@ -1,59 +1,63 @@
|
||||
Main Authors
|
||||
============
|
||||
------------
|
||||
|
||||
David Halter (@davidhalter) <davidhalter88@gmail.com>
|
||||
Takafumi Arakaki (@tkf) <aka.tkf@gmail.com>
|
||||
- David Halter (@davidhalter) <davidhalter88@gmail.com>
|
||||
- Takafumi Arakaki (@tkf) <aka.tkf@gmail.com>
|
||||
|
||||
Code Contributors
|
||||
=================
|
||||
-----------------
|
||||
|
||||
Danilo Bargen (@dbrgn) <mail@dbrgn.ch>
|
||||
Laurens Van Houtven (@lvh) <_@lvh.cc>
|
||||
Aldo Stracquadanio (@Astrac) <aldo.strac@gmail.com>
|
||||
Jean-Louis Fuchs (@ganwell) <ganwell@fangorn.ch>
|
||||
tek (@tek)
|
||||
Yasha Borevich (@jjay) <j.borevich@gmail.com>
|
||||
Aaron Griffin <aaronmgriffin@gmail.com>
|
||||
andviro (@andviro)
|
||||
Mike Gilbert (@floppym) <floppym@gentoo.org>
|
||||
Aaron Meurer (@asmeurer) <asmeurer@gmail.com>
|
||||
Lubos Trilety <ltrilety@redhat.com>
|
||||
Akinori Hattori (@hattya) <hattya@gmail.com>
|
||||
srusskih (@srusskih)
|
||||
Steven Silvester (@blink1073)
|
||||
Colin Duquesnoy (@ColinDuquesnoy) <colin.duquesnoy@gmail.com>
|
||||
Jorgen Schaefer (@jorgenschaefer) <contact@jorgenschaefer.de>
|
||||
Fredrik Bergroth (@fbergroth)
|
||||
Mathias Fußenegger (@mfussenegger)
|
||||
Syohei Yoshida (@syohex) <syohex@gmail.com>
|
||||
ppalucky (@ppalucky)
|
||||
immerrr (@immerrr) immerrr@gmail.com
|
||||
Albertas Agejevas (@alga)
|
||||
Savor d'Isavano (@KenetJervet) <newelevenken@163.com>
|
||||
Phillip Berndt (@phillipberndt) <phillip.berndt@gmail.com>
|
||||
Ian Lee (@IanLee1521) <IanLee1521@gmail.com>
|
||||
Farkhad Khatamov (@hatamov) <comsgn@gmail.com>
|
||||
Kevin Kelley (@kelleyk) <kelleyk@kelleyk.net>
|
||||
Sid Shanker (@squidarth) <sid.p.shanker@gmail.com>
|
||||
Reinoud Elhorst (@reinhrst)
|
||||
Guido van Rossum (@gvanrossum) <guido@python.org>
|
||||
Dmytro Sadovnychyi (@sadovnychyi) <jedi@dmit.ro>
|
||||
Cristi Burcă (@scribu)
|
||||
bstaint (@bstaint)
|
||||
Mathias Rav (@Mortal) <rav@cs.au.dk>
|
||||
Daniel Fiterman (@dfit99) <fitermandaniel2@gmail.com>
|
||||
Simon Ruggier (@sruggier)
|
||||
Élie Gouzien (@ElieGouzien)
|
||||
Robin Roth (@robinro)
|
||||
Malte Plath (@langsamer)
|
||||
Anton Zub (@zabulazza)
|
||||
Maksim Novikov (@m-novikov) <mnovikov.work@gmail.com>
|
||||
Tobias Rzepka (@TobiasRzepka)
|
||||
micbou (@micbou)
|
||||
Dima Gerasimov (@karlicoss) <karlicoss@gmail.com>
|
||||
Max Woerner Chase (@mwchase) <max.chase@gmail.com>
|
||||
Johannes Maria Frank (@jmfrank63) <jmfrank63@gmail.com>
|
||||
Shane Steinert-Threlkeld (@shanest) <ssshanest@gmail.com>
|
||||
Tim Gates (@timgates42) <tim.gates@iress.com>
|
||||
- Danilo Bargen (@dbrgn) <mail@dbrgn.ch>
|
||||
- Laurens Van Houtven (@lvh) <_@lvh.cc>
|
||||
- Aldo Stracquadanio (@Astrac) <aldo.strac@gmail.com>
|
||||
- Jean-Louis Fuchs (@ganwell) <ganwell@fangorn.ch>
|
||||
- tek (@tek)
|
||||
- Yasha Borevich (@jjay) <j.borevich@gmail.com>
|
||||
- Aaron Griffin <aaronmgriffin@gmail.com>
|
||||
- andviro (@andviro)
|
||||
- Mike Gilbert (@floppym) <floppym@gentoo.org>
|
||||
- Aaron Meurer (@asmeurer) <asmeurer@gmail.com>
|
||||
- Lubos Trilety <ltrilety@redhat.com>
|
||||
- Akinori Hattori (@hattya) <hattya@gmail.com>
|
||||
- srusskih (@srusskih)
|
||||
- Steven Silvester (@blink1073)
|
||||
- Colin Duquesnoy (@ColinDuquesnoy) <colin.duquesnoy@gmail.com>
|
||||
- Jorgen Schaefer (@jorgenschaefer) <contact@jorgenschaefer.de>
|
||||
- Fredrik Bergroth (@fbergroth)
|
||||
- Mathias Fußenegger (@mfussenegger)
|
||||
- Syohei Yoshida (@syohex) <syohex@gmail.com>
|
||||
- ppalucky (@ppalucky)
|
||||
- immerrr (@immerrr) immerrr@gmail.com
|
||||
- Albertas Agejevas (@alga)
|
||||
- Savor d'Isavano (@KenetJervet) <newelevenken@163.com>
|
||||
- Phillip Berndt (@phillipberndt) <phillip.berndt@gmail.com>
|
||||
- Ian Lee (@IanLee1521) <IanLee1521@gmail.com>
|
||||
- Farkhad Khatamov (@hatamov) <comsgn@gmail.com>
|
||||
- Kevin Kelley (@kelleyk) <kelleyk@kelleyk.net>
|
||||
- Sid Shanker (@squidarth) <sid.p.shanker@gmail.com>
|
||||
- Reinoud Elhorst (@reinhrst)
|
||||
- Guido van Rossum (@gvanrossum) <guido@python.org>
|
||||
- Dmytro Sadovnychyi (@sadovnychyi) <jedi@dmit.ro>
|
||||
- Cristi Burcă (@scribu)
|
||||
- bstaint (@bstaint)
|
||||
- Mathias Rav (@Mortal) <rav@cs.au.dk>
|
||||
- Daniel Fiterman (@dfit99) <fitermandaniel2@gmail.com>
|
||||
- Simon Ruggier (@sruggier)
|
||||
- Élie Gouzien (@ElieGouzien)
|
||||
- Robin Roth (@robinro)
|
||||
- Malte Plath (@langsamer)
|
||||
- Anton Zub (@zabulazza)
|
||||
- Maksim Novikov (@m-novikov) <mnovikov.work@gmail.com>
|
||||
- Tobias Rzepka (@TobiasRzepka)
|
||||
- micbou (@micbou)
|
||||
- Dima Gerasimov (@karlicoss) <karlicoss@gmail.com>
|
||||
- Max Woerner Chase (@mwchase) <max.chase@gmail.com>
|
||||
- Johannes Maria Frank (@jmfrank63) <jmfrank63@gmail.com>
|
||||
- Shane Steinert-Threlkeld (@shanest) <ssshanest@gmail.com>
|
||||
- Tim Gates (@timgates42) <tim.gates@iress.com>
|
||||
- Lior Goldberg (@goldberglior)
|
||||
- Ryan Clary (@mrclary)
|
||||
|
||||
And a few more "anonymous" contributors.
|
||||
|
||||
Note: (@user) means a github user name.
|
||||
|
||||
@@ -3,20 +3,46 @@
|
||||
Changelog
|
||||
---------
|
||||
|
||||
0.16.0 (2020--)
|
||||
0.17.0 (2020-04-14)
|
||||
+++++++++++++++++++
|
||||
|
||||
- Added ``Project`` support. This allows a user to specify which folders Jedi
|
||||
should work with.
|
||||
- Added support for Refactoring. The following refactorings have been
|
||||
implemented: ``Script.rename``, ``Script.inline``,
|
||||
``Script.extract_variable`` and ``Script.extract_function``.
|
||||
- Added ``Script.get_syntax_errors`` to display syntax errors in the current
|
||||
script.
|
||||
- Added code search capabilities both for individual files and projects. The
|
||||
new functions are ``Project.search``, ``Project.complete_search``,
|
||||
``Script.search`` and ``Script.complete_search``.
|
||||
- Added ``Script.help`` to make it easier to display a help window to people.
|
||||
Now returns pydoc information as well for Python keywords/operators. This
|
||||
means that on the class keyword it will now return the docstring of Python's
|
||||
builtin function ``help('class')``.
|
||||
- The API documentation is now way more readable and complete. Check it out
|
||||
under https://jedi.readthedocs.io. A lot of it has been rewritten.
|
||||
- Removed Python 3.4 support
|
||||
- Many bugfixes
|
||||
|
||||
This is likely going to be the last minor version that supports Python 2 and
|
||||
Python3.5. Bugfixes will be provided in 0.17.1+. The next minor/major version
|
||||
will probably be Jedi 1.0.0.
|
||||
|
||||
0.16.0 (2020-01-26)
|
||||
+++++++++++++++++++
|
||||
|
||||
- **Added** ``Script.get_context`` to get information where you currently are.
|
||||
- Goto on a function/attribute in a class now goes to the definition in its
|
||||
super class.
|
||||
- Completions/type inference of **Pytest fixtures**.
|
||||
- Tensorflow, Numpy and Pandas completions should now be about **4-10x faster**
|
||||
after the first time they are used.
|
||||
- Dict key completions are working now. e.g. ``d = {1000: 3}; d[10`` will
|
||||
expand to ``1000``.
|
||||
- Completion for "proxies" works now. These are classes that have a
|
||||
``__getattr__(self, name)`` method that does a ``return getattr(x, name)``.
|
||||
- Understanding of Pytest fixtures.
|
||||
- Tensorflow, Numpy and Pandas completions should now be about 4-10x faster
|
||||
after loading them initially.
|
||||
- Goto on a function/attribute in a class now goes to the definition in its
|
||||
super class.
|
||||
- Big **Script API Changes**:
|
||||
- The line and column parameters of ``jedi.Script`` are now deprecated
|
||||
- ``completions`` deprecated, use ``complete`` instead
|
||||
@@ -25,9 +51,18 @@ Changelog
|
||||
- ``call_signatures`` deprecated, use ``get_signatures`` instead
|
||||
- ``usages`` deprecated, use ``get_references`` instead
|
||||
- ``jedi.names`` deprecated, use ``jedi.Script(...).get_names()``
|
||||
- ``BaseDefinition.goto_assignments`` renamed to ``BaseDefinition.goto``
|
||||
- Python 2 support deprecated. For this release it is best effort. Python 2 has
|
||||
reached the end of its life and now it's just about a smooth transition.
|
||||
- ``BaseName.goto_assignments`` renamed to ``BaseName.goto``
|
||||
- Add follow_imports to ``Name.goto``. Now its signature matches
|
||||
``Script.goto``.
|
||||
- **Python 2 support deprecated**. For this release it is best effort. Python 2
|
||||
has reached the end of its life and now it's just about a smooth transition.
|
||||
Bugs for Python 2 will not be fixed anymore and a third of the tests are
|
||||
already skipped.
|
||||
- Removed ``settings.no_completion_duplicates``. It wasn't tested and nobody
|
||||
was probably using it anyway.
|
||||
- Removed ``settings.use_filesystem_cache`` and
|
||||
``settings.additional_dynamic_modules``, they have no usage anymore. Pretty
|
||||
much nobody was probably using them.
|
||||
|
||||
0.15.2 (2019-12-20)
|
||||
+++++++++++++++++++
|
||||
@@ -57,13 +92,13 @@ Changelog
|
||||
|
||||
New APIs:
|
||||
|
||||
- ``Definition.get_signatures() -> List[Signature]``. Signatures are similar to
|
||||
``CallSignature``. ``Definition.params`` is therefore deprecated.
|
||||
- ``Name.get_signatures() -> List[Signature]``. Signatures are similar to
|
||||
``CallSignature``. ``Name.params`` is therefore deprecated.
|
||||
- ``Signature.to_string()`` to format signatures.
|
||||
- ``Signature.params -> List[ParamDefinition]``, ParamDefinition has the
|
||||
- ``Signature.params -> List[ParamName]``, ParamName has the
|
||||
following additional attributes ``infer_default()``, ``infer_annotation()``,
|
||||
``to_string()``, and ``kind``.
|
||||
- ``Definition.execute() -> List[Definition]``, makes it possible to infer
|
||||
- ``Name.execute() -> List[Name]``, makes it possible to infer
|
||||
return values of functions.
|
||||
|
||||
|
||||
@@ -79,7 +114,7 @@ New APIs:
|
||||
- Added ``goto_*(prefer_stubs=True)`` as well as ``goto_*(prefer_stubs=True)``
|
||||
- Stubs are used now for type inference
|
||||
- Typeshed is used for better type inference
|
||||
- Reworked Definition.full_name, should have more correct return values
|
||||
- Reworked Name.full_name, should have more correct return values
|
||||
|
||||
0.13.3 (2019-02-24)
|
||||
+++++++++++++++++++
|
||||
@@ -159,7 +194,7 @@ New APIs:
|
||||
- Actual semantic completions for the complete Python syntax.
|
||||
- Basic type inference for ``yield from`` PEP 380.
|
||||
- PEP 484 support (most of the important features of it). Thanks Claude! (@reinhrst)
|
||||
- Added ``get_line_code`` to ``Definition`` and ``Completion`` objects.
|
||||
- Added ``get_line_code`` to ``Name`` and ``Completion`` objects.
|
||||
- Completely rewritten the type inference engine.
|
||||
- A new and better parser for (fast) parsing diffs of Python code.
|
||||
|
||||
|
||||
171
README.rst
171
README.rst
@@ -1,14 +1,14 @@
|
||||
###################################################################
|
||||
Jedi - an awesome autocompletion/static analysis library for Python
|
||||
###################################################################
|
||||
####################################################################################
|
||||
Jedi - an awesome autocompletion, static analysis and refactoring library for Python
|
||||
####################################################################################
|
||||
|
||||
.. image:: https://img.shields.io/pypi/v/jedi.svg?style=flat
|
||||
:target: https://pypi.python.org/pypi/jedi
|
||||
:alt: PyPI version
|
||||
.. image:: http://isitmaintained.com/badge/open/davidhalter/jedi.svg
|
||||
:target: https://github.com/davidhalter/jedi/issues
|
||||
:alt: The percentage of open issues and pull requests
|
||||
|
||||
.. image:: https://img.shields.io/pypi/pyversions/jedi.svg
|
||||
:target: https://pypi.python.org/pypi/jedi
|
||||
:alt: Supported Python versions
|
||||
.. image:: http://isitmaintained.com/badge/resolution/davidhalter/jedi.svg
|
||||
:target: https://github.com/davidhalter/jedi/issues
|
||||
:alt: The resolution time is the median time an issue or pull request stays open.
|
||||
|
||||
.. image:: https://travis-ci.org/davidhalter/jedi.svg?branch=master
|
||||
:target: https://travis-ci.org/davidhalter/jedi
|
||||
@@ -23,34 +23,27 @@ Jedi - an awesome autocompletion/static analysis library for Python
|
||||
:alt: Coverage status
|
||||
|
||||
|
||||
*If you have specific questions, please add an issue or ask on* `Stack Overflow
|
||||
<https://stackoverflow.com/questions/tagged/python-jedi>`_ *with the label* ``python-jedi``.
|
||||
Jedi is a static analysis tool for Python that is typically used in
|
||||
IDEs/editors plugins. Jedi has a focus on autocompletion and goto
|
||||
functionality. Other features include refactoring, code search and finding
|
||||
references.
|
||||
|
||||
|
||||
Jedi is a static analysis tool for Python that can be used in IDEs/editors.
|
||||
Jedi has a focus on autocompletion and goto functionality. Jedi is fast and is
|
||||
very well tested. It understands Python and stubs on a deep level.
|
||||
|
||||
Jedi has support for different goto functions. It's possible to search for
|
||||
references and list names in a Python file to get information about them.
|
||||
|
||||
Jedi uses a very simple API to connect with IDE's. There's a reference
|
||||
implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
|
||||
which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
|
||||
Autocompletion in your REPL is also possible, IPython uses it natively and for
|
||||
the CPython REPL you have to install it.
|
||||
Jedi has a simple API to work with. There is a reference implementation as a
|
||||
`VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_. Autocompletion in your
|
||||
REPL is also possible, IPython uses it natively and for the CPython REPL you
|
||||
can install it. Jedi is well tested and bugs should be rare.
|
||||
|
||||
Jedi can currently be used with the following editors/projects:
|
||||
|
||||
- Vim (jedi-vim_, YouCompleteMe_, deoplete-jedi_, completor.vim_)
|
||||
- `Visual Studio Code`_ (via `Python Extension <https://marketplace.visualstudio.com/items?itemName=ms-python.python>`_)
|
||||
- Emacs (Jedi.el_, company-mode_, elpy_, anaconda-mode_, ycmd_)
|
||||
- Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3])
|
||||
- TextMate_ (Not sure if it's actually working)
|
||||
- Kate_ version 4.13+ supports it natively, you have to enable it, though. [`proof
|
||||
- Kate_ version 4.13+ supports it natively, you have to enable it, though. [`see
|
||||
<https://projects.kde.org/projects/kde/applications/kate/repository/show?rev=KDE%2F4.13>`_]
|
||||
- Atom_ (autocomplete-python-jedi_)
|
||||
- `GNOME Builder`_ (with support for GObject Introspection)
|
||||
- `Visual Studio Code`_ (via `Python Extension <https://marketplace.visualstudio.com/items?itemName=ms-python.python>`_)
|
||||
- Gedit (gedi_)
|
||||
- wdb_ - Web Debugger
|
||||
- `Eric IDE`_ (Available as a plugin)
|
||||
@@ -58,60 +51,52 @@ Jedi can currently be used with the following editors/projects:
|
||||
|
||||
and many more!
|
||||
|
||||
|
||||
Here are some pictures taken from jedi-vim_:
|
||||
|
||||
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png
|
||||
|
||||
Completion for almost anything (Ctrl+Space).
|
||||
Completion for almost anything:
|
||||
|
||||
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png
|
||||
|
||||
Display of function/class bodies, docstrings.
|
||||
Documentation:
|
||||
|
||||
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png
|
||||
|
||||
Pydoc support (Shift+k).
|
||||
|
||||
There is also support for goto and renaming.
|
||||
|
||||
Get the latest version from `github <https://github.com/davidhalter/jedi>`_
|
||||
(master branch should always be kind of stable/working).
|
||||
|
||||
Docs are available at `https://jedi.readthedocs.org/en/latest/
|
||||
<https://jedi.readthedocs.org/en/latest/>`_. Pull requests with documentation
|
||||
enhancements and/or fixes are awesome and most welcome. Jedi uses `semantic
|
||||
versioning <https://semver.org/>`_.
|
||||
<https://jedi.readthedocs.org/en/latest/>`_. Pull requests with enhancements
|
||||
and/or fixes are awesome and most welcome. Jedi uses `semantic versioning
|
||||
<https://semver.org/>`_.
|
||||
|
||||
If you want to stay up-to-date (News / RFCs), please subscribe to this `github
|
||||
thread <https://github.com/davidhalter/jedi/issues/1063>`_.:
|
||||
|
||||
Issues & Questions
|
||||
==================
|
||||
|
||||
You can file issues and questions in the `issue tracker
|
||||
<https://github.com/davidhalter/jedi/>`. Alternatively you can also ask on
|
||||
`Stack Overflow <https://stackoverflow.com/questions/tagged/python-jedi>`_ with
|
||||
the label ``python-jedi``.
|
||||
|
||||
Installation
|
||||
============
|
||||
|
||||
pip install jedi
|
||||
`Check out the docs <https://jedi.readthedocs.org/en/latest/docs/installation.html>`_.
|
||||
|
||||
Note: This just installs the Jedi library, not the editor plugins. For
|
||||
information about how to make it work with your editor, refer to the
|
||||
corresponding documentation.
|
||||
Features and Limitations
|
||||
========================
|
||||
|
||||
You don't want to use ``pip``? Please refer to the `manual
|
||||
<https://jedi.readthedocs.org/en/latest/docs/installation.html>`_.
|
||||
Jedi's features are listed here:
|
||||
`Features <https://jedi.readthedocs.org/en/latest/docs/features.html>`_.
|
||||
|
||||
|
||||
Feature Support and Caveats
|
||||
===========================
|
||||
|
||||
Jedi really understands your Python code. For a comprehensive list what Jedi
|
||||
understands, see: `Features
|
||||
<https://jedi.readthedocs.org/en/latest/docs/features.html>`_. A list of
|
||||
caveats can be found on the same page.
|
||||
|
||||
You can run Jedi on CPython 2.7 or 3.4+ but it should also
|
||||
understand/parse code older than those versions. Additionally you should be able
|
||||
to use `Virtualenvs <https://jedi.readthedocs.org/en/latest/docs/api.html#environments>`_
|
||||
You can run Jedi on CPython 2.7 or 3.5+ but it should also
|
||||
understand code that is older than those versions. Additionally you should be
|
||||
able to use `Virtualenvs <https://jedi.readthedocs.org/en/latest/docs/api.html#environments>`_
|
||||
very well.
|
||||
|
||||
Tips on how to use Jedi efficiently can be found `here
|
||||
@@ -120,47 +105,62 @@ Tips on how to use Jedi efficiently can be found `here
|
||||
API
|
||||
---
|
||||
|
||||
You can find the documentation for the `API here <https://jedi.readthedocs.org/en/latest/docs/api.html>`_.
|
||||
You can find a comprehensive documentation for the
|
||||
`API here <https://jedi.readthedocs.org/en/latest/docs/api.html>`_.
|
||||
|
||||
Autocompletion / Goto / Documentation
|
||||
-------------------------------------
|
||||
|
||||
Autocompletion / Goto / Pydoc
|
||||
-----------------------------
|
||||
|
||||
Please check the API for a good explanation. There are the following commands:
|
||||
There are the following commands:
|
||||
|
||||
- ``jedi.Script.goto``
|
||||
- ``jedi.Script.infer``
|
||||
- ``jedi.Script.help``
|
||||
- ``jedi.Script.complete``
|
||||
- ``jedi.Script.get_references``
|
||||
- ``jedi.Script.get_signatures``
|
||||
- ``jedi.Script.get_context``
|
||||
|
||||
The returned objects are very powerful and really all you might need.
|
||||
|
||||
The returned objects are very powerful and are really all you might need.
|
||||
|
||||
Autocompletion in your REPL (IPython, etc.)
|
||||
-------------------------------------------
|
||||
|
||||
Starting with IPython `6.0.0` Jedi is a dependency of IPython. Autocompletion
|
||||
in IPython is therefore possible without additional configuration.
|
||||
Jedi is a dependency of IPython. Autocompletion in IPython with Jedi is
|
||||
therefore possible without additional configuration.
|
||||
|
||||
It's possible to have Jedi autocompletion in REPL modes - `example video <https://vimeo.com/122332037>`_.
|
||||
This means that in Python you can enable tab completion in a `REPL
|
||||
Here is an `example video <https://vimeo.com/122332037>`_ how REPL completion
|
||||
can look like.
|
||||
For the ``python`` shell you can enable tab completion in a `REPL
|
||||
<https://jedi.readthedocs.org/en/latest/docs/usage.html#tab-completion-in-the-python-shell>`_.
|
||||
|
||||
|
||||
Static Analysis
|
||||
------------------------
|
||||
---------------
|
||||
|
||||
To do all forms of static analysis, please try to use
|
||||
``jedi.Script(...).get_names``. It will return a list of names that you can use
|
||||
to infer types and so on.
|
||||
For a lot of forms of static analysis, you can try to use
|
||||
``jedi.Script(...).get_names``. It will return a list of names that you can
|
||||
then filter and work with. There is also a way to list the syntax errors in a
|
||||
file: ``jedi.Script.get_syntax_errors``.
|
||||
|
||||
|
||||
Refactoring
|
||||
-----------
|
||||
|
||||
Jedi's parser would support refactoring, but there's no API to use it right
|
||||
now. If you're interested in helping out here, let me know. With the latest
|
||||
parser changes, it should be very easy to actually make it work.
|
||||
Jedi supports the following refactorings:
|
||||
|
||||
- ``jedi.Script.inline``
|
||||
- ``jedi.Script.rename``
|
||||
- ``jedi.Script.extract_function``
|
||||
- ``jedi.Script.extract_variable``
|
||||
|
||||
Code Search
|
||||
-----------
|
||||
|
||||
There is support for module search with ``jedi.Script.search``, and project
|
||||
search for ``jedi.Project.search``. The way to search is either by providing a
|
||||
name like ``foo`` or by using dotted syntax like ``foo.bar``. Additionally you
|
||||
can provide the API type like ``class foo.bar.Bar``. There are also the
|
||||
functions ``jedi.Script.complete_search`` and ``jedi.Project.complete_search``.
|
||||
|
||||
Development
|
||||
===========
|
||||
@@ -168,39 +168,26 @@ Development
|
||||
There's a pretty good and extensive `development documentation
|
||||
<https://jedi.readthedocs.org/en/latest/docs/development.html>`_.
|
||||
|
||||
|
||||
Testing
|
||||
=======
|
||||
|
||||
The test suite depends on ``tox`` and ``pytest``::
|
||||
The test suite uses ``pytest``::
|
||||
|
||||
pip install tox pytest
|
||||
pip install pytest
|
||||
|
||||
To run the tests for all supported Python versions::
|
||||
If you want to test only a specific Python version (e.g. Python 3.8), it is as
|
||||
easy as::
|
||||
|
||||
tox
|
||||
|
||||
If you want to test only a specific Python version (e.g. Python 2.7), it's as
|
||||
easy as ::
|
||||
|
||||
tox -e py27
|
||||
|
||||
Tests are also run automatically on `Travis CI
|
||||
<https://travis-ci.org/davidhalter/jedi/>`_.
|
||||
python3.8 -m pytest
|
||||
|
||||
For more detailed information visit the `testing documentation
|
||||
<https://jedi.readthedocs.org/en/latest/docs/testing.html>`_.
|
||||
|
||||
|
||||
Acknowledgements
|
||||
================
|
||||
|
||||
- Takafumi Arakaki (@tkf) for creating a solid test environment and a lot of
|
||||
other things.
|
||||
- Danilo Bargen (@dbrgn) for general housekeeping and being a good friend :).
|
||||
- Guido van Rossum (@gvanrossum) for creating the parser generator pgen2
|
||||
(originally used in lib2to3).
|
||||
|
||||
Thanks a lot to all the
|
||||
`contributors <https://jedi.readthedocs.org/en/latest/docs/acknowledgements.html>`_!
|
||||
|
||||
|
||||
.. _jedi-vim: https://github.com/davidhalter/jedi-vim
|
||||
|
||||
90
appveyor.yml
90
appveyor.yml
@@ -1,68 +1,56 @@
|
||||
environment:
|
||||
matrix:
|
||||
- TOXENV: py27
|
||||
PYTHON_PATH: C:\Python27
|
||||
JEDI_TEST_ENVIRONMENT: 27
|
||||
- TOXENV: py27
|
||||
PYTHON_PATH: C:\Python27
|
||||
JEDI_TEST_ENVIRONMENT: 34
|
||||
- TOXENV: py27
|
||||
PYTHON_PATH: C:\Python27
|
||||
JEDI_TEST_ENVIRONMENT: 35
|
||||
- TOXENV: py27
|
||||
PYTHON_PATH: C:\Python27
|
||||
JEDI_TEST_ENVIRONMENT: 36
|
||||
- TOXENV: py27
|
||||
PYTHON_PATH: C:\Python27
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 37
|
||||
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
JEDI_TEST_ENVIRONMENT: 27
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
JEDI_TEST_ENVIRONMENT: 34
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
JEDI_TEST_ENVIRONMENT: 35
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 36
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
JEDI_TEST_ENVIRONMENT: 37
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 35
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 27
|
||||
|
||||
- TOXENV: py36
|
||||
PYTHON_PATH: C:\Python36
|
||||
JEDI_TEST_ENVIRONMENT: 27
|
||||
- TOXENV: py36
|
||||
PYTHON_PATH: C:\Python36
|
||||
JEDI_TEST_ENVIRONMENT: 34
|
||||
- TOXENV: py36
|
||||
PYTHON_PATH: C:\Python36
|
||||
JEDI_TEST_ENVIRONMENT: 35
|
||||
JEDI_TEST_ENVIRONMENT: 37
|
||||
- TOXENV: py36
|
||||
PYTHON_PATH: C:\Python36
|
||||
JEDI_TEST_ENVIRONMENT: 36
|
||||
- TOXENV: py36
|
||||
PYTHON_PATH: C:\Python36
|
||||
JEDI_TEST_ENVIRONMENT: 37
|
||||
JEDI_TEST_ENVIRONMENT: 35
|
||||
- TOXENV: py36
|
||||
PYTHON_PATH: C:\Python36
|
||||
JEDI_TEST_ENVIRONMENT: 27
|
||||
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 27
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 34
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 35
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
JEDI_TEST_ENVIRONMENT: 36
|
||||
- TOXENV: py37
|
||||
PYTHON_PATH: C:\Python37
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
JEDI_TEST_ENVIRONMENT: 37
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
JEDI_TEST_ENVIRONMENT: 36
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
JEDI_TEST_ENVIRONMENT: 35
|
||||
- TOXENV: py35
|
||||
PYTHON_PATH: C:\Python35
|
||||
JEDI_TEST_ENVIRONMENT: 27
|
||||
|
||||
- TOXENV: py27
|
||||
PYTHON_PATH: C:\Python27
|
||||
JEDI_TEST_ENVIRONMENT: 37
|
||||
- TOXENV: py27
|
||||
PYTHON_PATH: C:\Python27
|
||||
JEDI_TEST_ENVIRONMENT: 36
|
||||
- TOXENV: py27
|
||||
PYTHON_PATH: C:\Python27
|
||||
JEDI_TEST_ENVIRONMENT: 35
|
||||
- TOXENV: py27
|
||||
PYTHON_PATH: C:\Python27
|
||||
JEDI_TEST_ENVIRONMENT: 27
|
||||
install:
|
||||
- git submodule update --init --recursive
|
||||
- set PATH=%PYTHON_PATH%;%PYTHON_PATH%\Scripts;%PATH%
|
||||
|
||||
16
conftest.py
16
conftest.py
@@ -1,6 +1,7 @@
|
||||
import tempfile
|
||||
import shutil
|
||||
import os
|
||||
import sys
|
||||
from functools import partial
|
||||
|
||||
import pytest
|
||||
@@ -8,6 +9,7 @@ import pytest
|
||||
import jedi
|
||||
from jedi.api.environment import get_system_environment, InterpreterEnvironment
|
||||
from jedi._compatibility import py_version
|
||||
from test.helpers import test_dir
|
||||
|
||||
collect_ignore = [
|
||||
'setup.py',
|
||||
@@ -16,6 +18,9 @@ collect_ignore = [
|
||||
'build/',
|
||||
'test/examples',
|
||||
]
|
||||
if sys.version_info < (3, 6):
|
||||
# Python 2 not supported syntax
|
||||
collect_ignore.append('test/test_inference/test_mixed.py')
|
||||
|
||||
|
||||
# The following hooks (pytest_configure, pytest_unconfigure) are used
|
||||
@@ -105,6 +110,12 @@ def Script(environment):
|
||||
return partial(jedi.Script, environment=environment)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def ScriptWithProject(Script):
|
||||
project = jedi.Project(test_dir)
|
||||
return partial(jedi.Script, project=project)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def get_names(Script):
|
||||
return lambda code, **kwargs: Script(code).get_names(**kwargs)
|
||||
@@ -120,6 +131,11 @@ def goto_or_help(request, Script):
|
||||
return lambda code, *args, **kwargs: getattr(Script(code), request.param)(*args, **kwargs)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session', params=['goto', 'help', 'infer'])
|
||||
def goto_or_help_or_infer(request, Script):
|
||||
return lambda code, *args, **kwargs: getattr(Script(code), request.param)(*args, **kwargs)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def has_typing(environment):
|
||||
if environment.version_info >= (3, 5, 0):
|
||||
|
||||
9
docs/_static/custom_style.css
vendored
Normal file
9
docs/_static/custom_style.css
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
div.version {
|
||||
color: black !important;
|
||||
margin-top: -1.2em !important;
|
||||
margin-bottom: .6em !important;
|
||||
}
|
||||
|
||||
div.wy-side-nav-search {
|
||||
padding-top: 0 !important;
|
||||
}
|
||||
37
docs/_themes/flask/LICENSE
vendored
37
docs/_themes/flask/LICENSE
vendored
@@ -1,37 +0,0 @@
|
||||
Copyright (c) 2010 by Armin Ronacher.
|
||||
|
||||
Some rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms of the theme, with or
|
||||
without modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials provided
|
||||
with the distribution.
|
||||
|
||||
* The names of the contributors may not be used to endorse or
|
||||
promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
We kindly ask you to only use these themes in an unmodified manner just
|
||||
for Flask and Flask-related products, not for unrelated projects. If you
|
||||
like the visual style and want to use it for your own projects, please
|
||||
consider making some larger changes to the themes (such as changing
|
||||
font faces, sizes, colors or margins).
|
||||
|
||||
THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
27
docs/_themes/flask/layout.html
vendored
27
docs/_themes/flask/layout.html
vendored
@@ -1,27 +0,0 @@
|
||||
{%- extends "basic/layout.html" %}
|
||||
{%- block extrahead %}
|
||||
{{ super() }}
|
||||
{% if theme_touch_icon %}
|
||||
<link rel="apple-touch-icon" href="{{ pathto('_static/' ~ theme_touch_icon, 1) }}" />
|
||||
{% endif %}
|
||||
<link media="only screen and (max-device-width: 480px)" href="{{
|
||||
pathto('_static/small_flask.css', 1) }}" type= "text/css" rel="stylesheet" />
|
||||
<a href="https://github.com/davidhalter/jedi">
|
||||
<img style="position: absolute; top: 0; right: 0; border: 0;" src="https://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png" alt="Fork me on GitHub">
|
||||
</a>
|
||||
{% endblock %}
|
||||
{%- block relbar2 %}{% endblock %}
|
||||
{% block header %}
|
||||
{{ super() }}
|
||||
{% if pagename == 'index' %}
|
||||
<div class=indexwrapper>
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
{%- block footer %}
|
||||
<div class="footer">
|
||||
Created using <a href="http://sphinx.pocoo.org/">Sphinx</a>.
|
||||
</div>
|
||||
{% if pagename == 'index' %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{%- endblock %}
|
||||
19
docs/_themes/flask/relations.html
vendored
19
docs/_themes/flask/relations.html
vendored
@@ -1,19 +0,0 @@
|
||||
<h3>Related Topics</h3>
|
||||
<ul>
|
||||
<li><a href="{{ pathto(master_doc) }}">Documentation overview</a><ul>
|
||||
{%- for parent in parents %}
|
||||
<li><a href="{{ parent.link|e }}">{{ parent.title }}</a><ul>
|
||||
{%- endfor %}
|
||||
{%- if prev %}
|
||||
<li>Previous: <a href="{{ prev.link|e }}" title="{{ _('previous chapter')
|
||||
}}">{{ prev.title }}</a></li>
|
||||
{%- endif %}
|
||||
{%- if next %}
|
||||
<li>Next: <a href="{{ next.link|e }}" title="{{ _('next chapter')
|
||||
}}">{{ next.title }}</a></li>
|
||||
{%- endif %}
|
||||
{%- for parent in parents %}
|
||||
</ul></li>
|
||||
{%- endfor %}
|
||||
</ul></li>
|
||||
</ul>
|
||||
394
docs/_themes/flask/static/flasky.css_t
vendored
394
docs/_themes/flask/static/flasky.css_t
vendored
@@ -1,394 +0,0 @@
|
||||
/*
|
||||
* flasky.css_t
|
||||
* ~~~~~~~~~~~~
|
||||
*
|
||||
* :copyright: Copyright 2010 by Armin Ronacher.
|
||||
* :license: Flask Design License, see LICENSE for details.
|
||||
*/
|
||||
|
||||
{% set page_width = '940px' %}
|
||||
{% set sidebar_width = '220px' %}
|
||||
|
||||
@import url("basic.css");
|
||||
|
||||
/* -- page layout ----------------------------------------------------------- */
|
||||
|
||||
body {
|
||||
font-family: 'Georgia', serif;
|
||||
font-size: 17px;
|
||||
background-color: white;
|
||||
color: #000;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.document {
|
||||
width: {{ page_width }};
|
||||
margin: 30px auto 0 auto;
|
||||
}
|
||||
|
||||
div.documentwrapper {
|
||||
float: left;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.bodywrapper {
|
||||
margin: 0 0 0 {{ sidebar_width }};
|
||||
}
|
||||
|
||||
div.sphinxsidebar {
|
||||
width: {{ sidebar_width }};
|
||||
}
|
||||
|
||||
hr {
|
||||
border: 1px solid #B1B4B6;
|
||||
}
|
||||
|
||||
div.body {
|
||||
background-color: #ffffff;
|
||||
color: #3E4349;
|
||||
padding: 0 30px 0 30px;
|
||||
}
|
||||
|
||||
img.floatingflask {
|
||||
padding: 0 0 10px 10px;
|
||||
float: right;
|
||||
}
|
||||
|
||||
div.footer {
|
||||
width: {{ page_width }};
|
||||
margin: 20px auto 30px auto;
|
||||
font-size: 14px;
|
||||
color: #888;
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
div.footer a {
|
||||
color: #888;
|
||||
}
|
||||
|
||||
div.related {
|
||||
display: none;
|
||||
}
|
||||
|
||||
div.sphinxsidebar a {
|
||||
color: #444;
|
||||
text-decoration: none;
|
||||
border-bottom: 1px dotted #999;
|
||||
}
|
||||
|
||||
div.sphinxsidebar a:hover {
|
||||
border-bottom: 1px solid #999;
|
||||
}
|
||||
|
||||
div.sphinxsidebar {
|
||||
font-size: 14px;
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
div.sphinxsidebarwrapper {
|
||||
padding: 18px 10px;
|
||||
}
|
||||
|
||||
div.sphinxsidebarwrapper p.logo {
|
||||
padding: 0 0 20px 0;
|
||||
margin: 0;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3,
|
||||
div.sphinxsidebar h4 {
|
||||
font-family: 'Garamond', 'Georgia', serif;
|
||||
color: #444;
|
||||
font-size: 24px;
|
||||
font-weight: normal;
|
||||
margin: 0 0 5px 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h4 {
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3 a {
|
||||
color: #444;
|
||||
}
|
||||
|
||||
div.sphinxsidebar p.logo a,
|
||||
div.sphinxsidebar h3 a,
|
||||
div.sphinxsidebar p.logo a:hover,
|
||||
div.sphinxsidebar h3 a:hover {
|
||||
border: none;
|
||||
}
|
||||
|
||||
div.sphinxsidebar p {
|
||||
color: #555;
|
||||
margin: 10px 0;
|
||||
}
|
||||
|
||||
div.sphinxsidebar ul {
|
||||
margin: 10px 0;
|
||||
padding: 0;
|
||||
color: #000;
|
||||
}
|
||||
|
||||
div.sphinxsidebar input {
|
||||
border: 1px solid #ccc;
|
||||
font-family: 'Georgia', serif;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
/* -- body styles ----------------------------------------------------------- */
|
||||
|
||||
a {
|
||||
color: #004B6B;
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: #6D4100;
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
div.body h1,
|
||||
div.body h2,
|
||||
div.body h3,
|
||||
div.body h4,
|
||||
div.body h5,
|
||||
div.body h6 {
|
||||
font-family: 'Garamond', 'Georgia', serif;
|
||||
font-weight: normal;
|
||||
margin: 30px 0px 10px 0px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
{% if theme_index_logo %}
|
||||
div.indexwrapper h1 {
|
||||
text-indent: -999999px;
|
||||
background: url({{ theme_index_logo }}) no-repeat center center;
|
||||
height: {{ theme_index_logo_height }};
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; }
|
||||
div.body h2 { font-size: 180%; }
|
||||
div.body h3 { font-size: 150%; }
|
||||
div.body h4 { font-size: 130%; }
|
||||
div.body h5 { font-size: 100%; }
|
||||
div.body h6 { font-size: 100%; }
|
||||
|
||||
a.headerlink {
|
||||
color: #ddd;
|
||||
padding: 0 4px;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a.headerlink:hover {
|
||||
color: #444;
|
||||
}
|
||||
|
||||
div.body p, div.body dd, div.body li {
|
||||
line-height: 1.4em;
|
||||
}
|
||||
|
||||
div.admonition {
|
||||
background: #fafafa;
|
||||
margin: 20px -30px;
|
||||
padding: 10px 30px;
|
||||
border-top: 1px solid #ccc;
|
||||
border-bottom: 1px solid #ccc;
|
||||
}
|
||||
|
||||
div.admonition tt.xref, div.admonition a tt {
|
||||
border-bottom: 1px solid #fafafa;
|
||||
}
|
||||
|
||||
dd div.admonition {
|
||||
margin-left: -60px;
|
||||
padding-left: 60px;
|
||||
}
|
||||
|
||||
div.admonition p.admonition-title {
|
||||
font-family: 'Garamond', 'Georgia', serif;
|
||||
font-weight: normal;
|
||||
font-size: 24px;
|
||||
margin: 0 0 10px 0;
|
||||
padding: 0;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
div.admonition p.last {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
div.highlight {
|
||||
background-color: white;
|
||||
}
|
||||
|
||||
dt:target, .highlight {
|
||||
background: #FAF3E8;
|
||||
}
|
||||
|
||||
div.note {
|
||||
background-color: #eee;
|
||||
border: 1px solid #ccc;
|
||||
}
|
||||
|
||||
div.seealso {
|
||||
background-color: #ffc;
|
||||
border: 1px solid #ff6;
|
||||
}
|
||||
|
||||
div.topic {
|
||||
background-color: #eee;
|
||||
}
|
||||
|
||||
p.admonition-title {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
p.admonition-title:after {
|
||||
content: ":";
|
||||
}
|
||||
|
||||
pre, tt {
|
||||
font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
img.screenshot {
|
||||
}
|
||||
|
||||
tt.descname, tt.descclassname {
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
tt.descname {
|
||||
padding-right: 0.08em;
|
||||
}
|
||||
|
||||
img.screenshot {
|
||||
-moz-box-shadow: 2px 2px 4px #eee;
|
||||
-webkit-box-shadow: 2px 2px 4px #eee;
|
||||
box-shadow: 2px 2px 4px #eee;
|
||||
}
|
||||
|
||||
table.docutils {
|
||||
border: 1px solid #888;
|
||||
-moz-box-shadow: 2px 2px 4px #eee;
|
||||
-webkit-box-shadow: 2px 2px 4px #eee;
|
||||
box-shadow: 2px 2px 4px #eee;
|
||||
}
|
||||
|
||||
table.docutils td, table.docutils th {
|
||||
border: 1px solid #888;
|
||||
padding: 0.25em 0.7em;
|
||||
}
|
||||
|
||||
table.field-list, table.footnote {
|
||||
border: none;
|
||||
-moz-box-shadow: none;
|
||||
-webkit-box-shadow: none;
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
table.footnote {
|
||||
margin: 15px 0;
|
||||
width: 100%;
|
||||
border: 1px solid #eee;
|
||||
background: #fdfdfd;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
table.footnote + table.footnote {
|
||||
margin-top: -15px;
|
||||
border-top: none;
|
||||
}
|
||||
|
||||
table.field-list th {
|
||||
padding: 0 0.8em 0 0;
|
||||
}
|
||||
|
||||
table.field-list td {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
table.footnote td.label {
|
||||
width: 0px;
|
||||
padding: 0.3em 0 0.3em 0.5em;
|
||||
}
|
||||
|
||||
table.footnote td {
|
||||
padding: 0.3em 0.5em;
|
||||
}
|
||||
|
||||
dl {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
dl dd {
|
||||
margin-left: 30px;
|
||||
}
|
||||
|
||||
blockquote {
|
||||
margin: 0 0 0 30px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ul, ol {
|
||||
margin: 10px 0 10px 30px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
pre {
|
||||
background: #eee;
|
||||
padding: 7px 30px;
|
||||
margin: 15px -30px;
|
||||
line-height: 1.3em;
|
||||
}
|
||||
|
||||
dl pre, blockquote pre, li pre {
|
||||
margin-left: -60px;
|
||||
padding-left: 60px;
|
||||
}
|
||||
|
||||
dl dl pre {
|
||||
margin-left: -90px;
|
||||
padding-left: 90px;
|
||||
}
|
||||
|
||||
tt {
|
||||
background-color: #ecf0f3;
|
||||
color: #222;
|
||||
/* padding: 1px 2px; */
|
||||
}
|
||||
|
||||
tt.xref, a tt {
|
||||
background-color: #FBFBFB;
|
||||
border-bottom: 1px solid white;
|
||||
}
|
||||
|
||||
a.reference {
|
||||
text-decoration: none;
|
||||
border-bottom: 1px dotted #004B6B;
|
||||
}
|
||||
|
||||
a.reference:hover {
|
||||
border-bottom: 1px solid #6D4100;
|
||||
}
|
||||
|
||||
a.footnote-reference {
|
||||
text-decoration: none;
|
||||
font-size: 0.7em;
|
||||
vertical-align: top;
|
||||
border-bottom: 1px dotted #004B6B;
|
||||
}
|
||||
|
||||
a.footnote-reference:hover {
|
||||
border-bottom: 1px solid #6D4100;
|
||||
}
|
||||
|
||||
a:hover tt {
|
||||
background: #EEE;
|
||||
}
|
||||
70
docs/_themes/flask/static/small_flask.css
vendored
70
docs/_themes/flask/static/small_flask.css
vendored
@@ -1,70 +0,0 @@
|
||||
/*
|
||||
* small_flask.css_t
|
||||
* ~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* :copyright: Copyright 2010 by Armin Ronacher.
|
||||
* :license: Flask Design License, see LICENSE for details.
|
||||
*/
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 20px 30px;
|
||||
}
|
||||
|
||||
div.documentwrapper {
|
||||
float: none;
|
||||
background: white;
|
||||
}
|
||||
|
||||
div.sphinxsidebar {
|
||||
display: block;
|
||||
float: none;
|
||||
width: 102.5%;
|
||||
margin: 50px -30px -20px -30px;
|
||||
padding: 10px 20px;
|
||||
background: #333;
|
||||
color: white;
|
||||
}
|
||||
|
||||
div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p,
|
||||
div.sphinxsidebar h3 a {
|
||||
color: white;
|
||||
}
|
||||
|
||||
div.sphinxsidebar a {
|
||||
color: #aaa;
|
||||
}
|
||||
|
||||
div.sphinxsidebar p.logo {
|
||||
display: none;
|
||||
}
|
||||
|
||||
div.document {
|
||||
width: 100%;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div.related {
|
||||
display: block;
|
||||
margin: 0;
|
||||
padding: 10px 0 20px 0;
|
||||
}
|
||||
|
||||
div.related ul,
|
||||
div.related ul li {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
div.footer {
|
||||
display: none;
|
||||
}
|
||||
|
||||
div.bodywrapper {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div.body {
|
||||
min-height: 0;
|
||||
padding: 0;
|
||||
}
|
||||
9
docs/_themes/flask/theme.conf
vendored
9
docs/_themes/flask/theme.conf
vendored
@@ -1,9 +0,0 @@
|
||||
[theme]
|
||||
inherit = basic
|
||||
stylesheet = flasky.css
|
||||
pygments_style = flask_theme_support.FlaskyStyle
|
||||
|
||||
[options]
|
||||
index_logo =
|
||||
index_logo_height = 120px
|
||||
touch_icon =
|
||||
125
docs/_themes/flask_theme_support.py
vendored
125
docs/_themes/flask_theme_support.py
vendored
@@ -1,125 +0,0 @@
|
||||
"""
|
||||
Copyright (c) 2010 by Armin Ronacher.
|
||||
|
||||
Some rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms of the theme, with or
|
||||
without modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials provided
|
||||
with the distribution.
|
||||
|
||||
* The names of the contributors may not be used to endorse or
|
||||
promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
We kindly ask you to only use these themes in an unmodified manner just
|
||||
for Flask and Flask-related products, not for unrelated projects. If you
|
||||
like the visual style and want to use it for your own projects, please
|
||||
consider making some larger changes to the themes (such as changing
|
||||
font faces, sizes, colors or margins).
|
||||
|
||||
THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
"""
|
||||
# flasky extensions. flasky pygments style based on tango style
|
||||
from pygments.style import Style
|
||||
from pygments.token import Keyword, Name, Comment, String, Error, \
|
||||
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
|
||||
|
||||
|
||||
class FlaskyStyle(Style):
|
||||
background_color = "#f8f8f8"
|
||||
default_style = ""
|
||||
|
||||
styles = {
|
||||
# No corresponding class for the following:
|
||||
#Text: "", # class: ''
|
||||
Whitespace: "underline #f8f8f8", # class: 'w'
|
||||
Error: "#a40000 border:#ef2929", # class: 'err'
|
||||
Other: "#000000", # class 'x'
|
||||
|
||||
Comment: "italic #8f5902", # class: 'c'
|
||||
Comment.Preproc: "noitalic", # class: 'cp'
|
||||
|
||||
Keyword: "bold #004461", # class: 'k'
|
||||
Keyword.Constant: "bold #004461", # class: 'kc'
|
||||
Keyword.Declaration: "bold #004461", # class: 'kd'
|
||||
Keyword.Namespace: "bold #004461", # class: 'kn'
|
||||
Keyword.Pseudo: "bold #004461", # class: 'kp'
|
||||
Keyword.Reserved: "bold #004461", # class: 'kr'
|
||||
Keyword.Type: "bold #004461", # class: 'kt'
|
||||
|
||||
Operator: "#582800", # class: 'o'
|
||||
Operator.Word: "bold #004461", # class: 'ow' - like keywords
|
||||
|
||||
Punctuation: "bold #000000", # class: 'p'
|
||||
|
||||
# because special names such as Name.Class, Name.Function, etc.
|
||||
# are not recognized as such later in the parsing, we choose them
|
||||
# to look the same as ordinary variables.
|
||||
Name: "#000000", # class: 'n'
|
||||
Name.Attribute: "#c4a000", # class: 'na' - to be revised
|
||||
Name.Builtin: "#004461", # class: 'nb'
|
||||
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
|
||||
Name.Class: "#000000", # class: 'nc' - to be revised
|
||||
Name.Constant: "#000000", # class: 'no' - to be revised
|
||||
Name.Decorator: "#888", # class: 'nd' - to be revised
|
||||
Name.Entity: "#ce5c00", # class: 'ni'
|
||||
Name.Exception: "bold #cc0000", # class: 'ne'
|
||||
Name.Function: "#000000", # class: 'nf'
|
||||
Name.Property: "#000000", # class: 'py'
|
||||
Name.Label: "#f57900", # class: 'nl'
|
||||
Name.Namespace: "#000000", # class: 'nn' - to be revised
|
||||
Name.Other: "#000000", # class: 'nx'
|
||||
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
|
||||
Name.Variable: "#000000", # class: 'nv' - to be revised
|
||||
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
|
||||
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
|
||||
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
|
||||
|
||||
Number: "#990000", # class: 'm'
|
||||
|
||||
Literal: "#000000", # class: 'l'
|
||||
Literal.Date: "#000000", # class: 'ld'
|
||||
|
||||
String: "#4e9a06", # class: 's'
|
||||
String.Backtick: "#4e9a06", # class: 'sb'
|
||||
String.Char: "#4e9a06", # class: 'sc'
|
||||
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
|
||||
String.Double: "#4e9a06", # class: 's2'
|
||||
String.Escape: "#4e9a06", # class: 'se'
|
||||
String.Heredoc: "#4e9a06", # class: 'sh'
|
||||
String.Interpol: "#4e9a06", # class: 'si'
|
||||
String.Other: "#4e9a06", # class: 'sx'
|
||||
String.Regex: "#4e9a06", # class: 'sr'
|
||||
String.Single: "#4e9a06", # class: 's1'
|
||||
String.Symbol: "#4e9a06", # class: 'ss'
|
||||
|
||||
Generic: "#000000", # class: 'g'
|
||||
Generic.Deleted: "#a40000", # class: 'gd'
|
||||
Generic.Emph: "italic #000000", # class: 'ge'
|
||||
Generic.Error: "#ef2929", # class: 'gr'
|
||||
Generic.Heading: "bold #000080", # class: 'gh'
|
||||
Generic.Inserted: "#00A000", # class: 'gi'
|
||||
Generic.Output: "#888", # class: 'go'
|
||||
Generic.Prompt: "#745334", # class: 'gp'
|
||||
Generic.Strong: "bold #000000", # class: 'gs'
|
||||
Generic.Subheading: "bold #800080", # class: 'gu'
|
||||
Generic.Traceback: "bold #a40000", # class: 'gt'
|
||||
}
|
||||
31
docs/conf.py
31
docs/conf.py
@@ -13,13 +13,11 @@
|
||||
|
||||
import sys
|
||||
import os
|
||||
import datetime
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
sys.path.insert(0, os.path.abspath('..'))
|
||||
sys.path.append(os.path.abspath('_themes'))
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
@@ -29,7 +27,8 @@ sys.path.append(os.path.abspath('_themes'))
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.todo',
|
||||
'sphinx.ext.intersphinx', 'sphinx.ext.inheritance_diagram']
|
||||
'sphinx.ext.intersphinx', 'sphinx.ext.inheritance_diagram',
|
||||
'sphinx_rtd_theme', 'sphinx.ext.autosummary']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
@@ -54,8 +53,8 @@ from jedi.utils import version_info
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '.'.join(str(x) for x in version_info()[:2])
|
||||
# The short X.Y.Z version.
|
||||
version = '.'.join(str(x) for x in version_info()[:3])
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = jedi.__version__
|
||||
|
||||
@@ -98,12 +97,15 @@ pygments_style = 'sphinx'
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'flask'
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
html_theme_options = {
|
||||
'logo_only': True,
|
||||
'style_nav_header_background': 'white',
|
||||
}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
html_theme_path = ['_themes']
|
||||
@@ -117,7 +119,7 @@ html_theme_path = ['_themes']
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
html_logo = '_static/logo.png'
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
@@ -129,6 +131,8 @@ html_theme_path = ['_themes']
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
html_css_files = ['custom_style.css']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
@@ -145,7 +149,7 @@ html_sidebars = {
|
||||
#'relations.html',
|
||||
'ghbuttons.html',
|
||||
#'sourcelink.html',
|
||||
#'searchbox.html'
|
||||
'searchbox.html'
|
||||
]
|
||||
}
|
||||
|
||||
@@ -163,13 +167,13 @@ html_sidebars = {
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
html_show_sourcelink = False
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
html_show_sphinx = False
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
html_show_copyright = False
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
@@ -274,7 +278,8 @@ autodoc_default_flags = []
|
||||
# -- Options for intersphinx module --------------------------------------------
|
||||
|
||||
intersphinx_mapping = {
|
||||
'https://docs.python.org/': None,
|
||||
'python': ('https://docs.python.org/', None),
|
||||
'parso': ('https://parso.readthedocs.io/en/latest/', None),
|
||||
}
|
||||
|
||||
|
||||
|
||||
66
docs/docs/acknowledgements.rst
Normal file
66
docs/docs/acknowledgements.rst
Normal file
@@ -0,0 +1,66 @@
|
||||
.. include global.rst
|
||||
|
||||
History & Acknowledgements
|
||||
==========================
|
||||
|
||||
Acknowledgements
|
||||
----------------
|
||||
|
||||
- Dave Halter for creating and maintaining Jedi & Parso.
|
||||
- Takafumi Arakaki (@tkf) for creating a solid test environment and a lot of
|
||||
other things.
|
||||
- Danilo Bargen (@dbrgn) for general housekeeping and being a good friend :).
|
||||
- Guido van Rossum (@gvanrossum) for creating the parser generator pgen2
|
||||
(originally used in lib2to3).
|
||||
- Thanks to all the :ref:`contributors <contributors>`.
|
||||
|
||||
A Little Bit of History
|
||||
-----------------------
|
||||
|
||||
Written by Dave.
|
||||
|
||||
The Star Wars Jedi are awesome. My Jedi software tries to imitate a little bit
|
||||
of the precognition the Jedi have. There's even an awesome `scene
|
||||
<https://youtu.be/yHRJLIf7wMU>`_ of Monty Python Jedis :-).
|
||||
|
||||
But actually the name has not much to do with Star Wars. It's part of my
|
||||
second name Jedidjah.
|
||||
|
||||
I actually started Jedi back in 2012, because there were no good solutions
|
||||
available for VIM. Most auto-completion solutions just did not work well. The
|
||||
only good solution was PyCharm. But I liked my good old VIM very much. There
|
||||
was also a solution called Rope that did not work at all for me. So I decided
|
||||
to write my own version of a completion engine.
|
||||
|
||||
The first idea was to execute non-dangerous code. But I soon realized, that
|
||||
this would not work. So I started to build a static analysis tool.
|
||||
The biggest problem that I had at the time was that I did not know a thing
|
||||
about parsers.I did not did not even know the word static analysis. It turns
|
||||
out they are the foundation of a good static analysis tool. I of course did not
|
||||
know that and tried to write my own poor version of a parser that I ended up
|
||||
throwing away two years later.
|
||||
|
||||
Because of my lack of knowledge, everything after 2012 and before 2020 was
|
||||
basically refactoring. I rewrote the core parts of Jedi probably like 5-10
|
||||
times. The last big rewrite (that I did twice) was the inclusion of
|
||||
gradual typing and stubs.
|
||||
|
||||
I learned during that time that it is crucial to have a good understanding of
|
||||
your problem. Otherwise you just end up doing it again. I only wrote features
|
||||
in the beginning and in the end. Everything else was bugfixing and refactoring.
|
||||
However now I am really happy with the result. It works well, bugfixes can be
|
||||
quick and is pretty much feature complete.
|
||||
|
||||
--------
|
||||
|
||||
I will leave you with a small annectote that happend in 2012, if I remember
|
||||
correctly. After I explained Guido van Rossum, how some parts of my
|
||||
auto-completion work, he said:
|
||||
|
||||
*"Oh, that worries me..."*
|
||||
|
||||
Now that it is finished, I hope he likes it :-).
|
||||
|
||||
.. _contributors:
|
||||
|
||||
.. include:: ../../AUTHORS.txt
|
||||
@@ -5,6 +5,49 @@
|
||||
API Return Classes
|
||||
------------------
|
||||
|
||||
.. automodule:: jedi.api.classes
|
||||
Abstract Base Class
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
.. autoclass:: jedi.api.classes.BaseName
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Name
|
||||
~~~~
|
||||
.. autoclass:: jedi.api.classes.Name
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
Completion
|
||||
~~~~~~~~~~
|
||||
.. autoclass:: jedi.api.classes.Completion
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
BaseSignature
|
||||
~~~~~~~~~~~~~
|
||||
.. autoclass:: jedi.api.classes.BaseSignature
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
Signature
|
||||
~~~~~~~~~
|
||||
.. autoclass:: jedi.api.classes.Signature
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
ParamName
|
||||
~~~~~~~~~
|
||||
.. autoclass:: jedi.api.classes.ParamName
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
Refactoring
|
||||
~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: jedi.api.refactoring.Refactoring
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: jedi.api.errors.SyntaxError
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
@@ -3,56 +3,74 @@
|
||||
API Overview
|
||||
============
|
||||
|
||||
.. currentmodule:: jedi
|
||||
|
||||
Note: This documentation is for Plugin developers, who want to improve their
|
||||
editors/IDE autocompletion
|
||||
|
||||
If you want to use |jedi|, you first need to ``import jedi``. You then have
|
||||
direct access to the :class:`.Script`. You can then call the functions
|
||||
documented here. These functions return :ref:`API classes
|
||||
<api-classes>`.
|
||||
|
||||
|
||||
Deprecations
|
||||
------------
|
||||
|
||||
The deprecation process is as follows:
|
||||
|
||||
1. A deprecation is announced in the next major/minor release.
|
||||
2. We wait either at least a year & at least two minor releases until we remove
|
||||
the deprecated functionality.
|
||||
|
||||
|
||||
API Documentation
|
||||
-----------------
|
||||
|
||||
The API consists of a few different parts:
|
||||
|
||||
- The main starting points for complete/goto: :class:`.Script` and :class:`.Interpreter`
|
||||
- Helpful functions: :func:`.preload_module` and :func:`.set_debug_function`
|
||||
- :ref:`API Result Classes <api-classes>`
|
||||
- :ref:`Python Versions/Virtualenv Support <environments>` with functions like
|
||||
:func:`.find_system_environments` and :func:`.find_virtualenvs`
|
||||
.. note:: This documentation is mostly for Plugin developers, who want to
|
||||
improve their editors/IDE with Jedi.
|
||||
|
||||
.. _api:
|
||||
|
||||
Static Analysis Interface
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
The API consists of a few different parts:
|
||||
|
||||
.. automodule:: jedi
|
||||
- The main starting points for complete/goto: :class:`.Script` and
|
||||
:class:`.Interpreter`. If you work with Jedi you want to understand these
|
||||
classes first.
|
||||
- :ref:`API Result Classes <api-classes>`
|
||||
- :ref:`Python Versions/Virtualenv Support <environments>` with functions like
|
||||
:func:`.find_system_environments` and :func:`.find_virtualenvs`
|
||||
- A way to work with different :ref:`Folders / Projects <projects>`
|
||||
- Helpful functions: :func:`.preload_module` and :func:`.set_debug_function`
|
||||
|
||||
The methods that you are most likely going to use to work with Jedi are the
|
||||
following ones:
|
||||
|
||||
.. currentmodule:: jedi
|
||||
|
||||
.. autosummary::
|
||||
:nosignatures:
|
||||
|
||||
Script.complete
|
||||
Script.goto
|
||||
Script.infer
|
||||
Script.help
|
||||
Script.get_signatures
|
||||
Script.get_references
|
||||
Script.get_context
|
||||
Script.get_names
|
||||
Script.get_syntax_errors
|
||||
Script.rename
|
||||
Script.inline
|
||||
Script.extract_variable
|
||||
Script.extract_function
|
||||
Script.search
|
||||
Script.complete_search
|
||||
Project.search
|
||||
Project.complete_search
|
||||
|
||||
Script
|
||||
------
|
||||
|
||||
.. autoclass:: jedi.Script
|
||||
:members:
|
||||
|
||||
Interpreter
|
||||
-----------
|
||||
.. autoclass:: jedi.Interpreter
|
||||
:members:
|
||||
.. autofunction:: jedi.preload_module
|
||||
.. autofunction:: jedi.set_debug_function
|
||||
|
||||
.. _projects:
|
||||
|
||||
Projects
|
||||
--------
|
||||
|
||||
.. automodule:: jedi.api.project
|
||||
|
||||
.. autofunction:: jedi.get_default_project
|
||||
.. autoclass:: jedi.Project
|
||||
:members:
|
||||
|
||||
.. _environments:
|
||||
|
||||
Environments
|
||||
~~~~~~~~~~~~
|
||||
------------
|
||||
|
||||
.. automodule:: jedi.api.environment
|
||||
|
||||
@@ -65,18 +83,31 @@ Environments
|
||||
.. autoclass:: jedi.api.environment.Environment
|
||||
:members:
|
||||
|
||||
Helper Functions
|
||||
----------------
|
||||
|
||||
.. autofunction:: jedi.preload_module
|
||||
.. autofunction:: jedi.set_debug_function
|
||||
|
||||
Errors
|
||||
------
|
||||
|
||||
.. autoexception:: jedi.InternalError
|
||||
.. autoexception:: jedi.RefactoringError
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Completions:
|
||||
Completions
|
||||
~~~~~~~~~~~
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
>>> import jedi
|
||||
>>> source = '''import json; json.l'''
|
||||
>>> script = jedi.Script(source, path='')
|
||||
>>> code = '''import json; json.l'''
|
||||
>>> script = jedi.Script(code, path='example.py')
|
||||
>>> script
|
||||
<jedi.api.Script object at 0x2121b10>
|
||||
<Script: 'example.py' <SameEnvironment: 3.5.2 in /usr>>
|
||||
>>> completions = script.complete(1, 19)
|
||||
>>> completions
|
||||
[<Completion: load>, <Completion: loads>]
|
||||
@@ -87,12 +118,14 @@ Completions:
|
||||
>>> completions[1].name
|
||||
'loads'
|
||||
|
||||
Definitions / Goto:
|
||||
Type Inference / Goto
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
>>> import jedi
|
||||
>>> source = '''def my_func():
|
||||
>>> code = '''\
|
||||
... def my_func():
|
||||
... print 'called'
|
||||
...
|
||||
... alias = my_func
|
||||
@@ -100,30 +133,42 @@ Definitions / Goto:
|
||||
... inception = my_list[2]
|
||||
...
|
||||
... inception()'''
|
||||
>>> script = jedi.Script(source, path='')
|
||||
>>> script = jedi.Script(code)
|
||||
>>>
|
||||
>>> script.goto(8, 1)
|
||||
[<Definition inception=my_list[2]>]
|
||||
[<Name full_name='__main__.inception', description='inception = my_list[2]'>]
|
||||
>>>
|
||||
>>> script.infer(8, 1)
|
||||
[<Definition def my_func>]
|
||||
[<Name full_name='__main__.my_func', description='def my_func'>]
|
||||
|
||||
References:
|
||||
References
|
||||
~~~~~~~~~~
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
>>> import jedi
|
||||
>>> source = '''x = 3
|
||||
>>> code = '''\
|
||||
... x = 3
|
||||
... if 1 == 2:
|
||||
... x = 4
|
||||
... else:
|
||||
... del x'''
|
||||
>>> script = jedi.Script(source, '')
|
||||
>>> script = jedi.Script(code)
|
||||
>>> rns = script.get_references(5, 8)
|
||||
>>> rns
|
||||
[<Definition full_name='__main__.x', description='x = 3'>,
|
||||
<Definition full_name='__main__.x', description='x'>]
|
||||
[<Name full_name='__main__.x', description='x = 3'>,
|
||||
<Name full_name='__main__.x', description='x = 4'>,
|
||||
<Name full_name='__main__.x', description='del x'>]
|
||||
>>> rns[1].line
|
||||
5
|
||||
>>> rns[0].column
|
||||
8
|
||||
3
|
||||
>>> rns[1].column
|
||||
4
|
||||
|
||||
Deprecations
|
||||
------------
|
||||
|
||||
The deprecation process is as follows:
|
||||
|
||||
1. A deprecation is announced in the next major/minor release.
|
||||
2. We wait either at least a year and at least two minor releases until we
|
||||
remove the deprecated functionality.
|
||||
|
||||
1
docs/docs/changelog.rst
Normal file
1
docs/docs/changelog.rst
Normal file
@@ -0,0 +1 @@
|
||||
.. include:: ../../CHANGELOG.rst
|
||||
@@ -22,16 +22,12 @@ couldn't get rid of complexity. I know that **simple is better than complex**,
|
||||
but unfortunately it sometimes requires complex solutions to understand complex
|
||||
systems.
|
||||
|
||||
Since most of the Jedi internals have been written by me (David Halter), this
|
||||
introduction will be written mostly by me, because no one else understands to
|
||||
the same level how Jedi works. Actually this is also the reason for exactly this
|
||||
part of the documentation. To make multiple people able to edit the Jedi core.
|
||||
|
||||
In five chapters I'm trying to describe the internals of |jedi|:
|
||||
In six chapters I'm trying to describe the internals of |jedi|:
|
||||
|
||||
- :ref:`The Jedi Core <core>`
|
||||
- :ref:`Core Extensions <core-extensions>`
|
||||
- :ref:`Imports & Modules <imports-modules>`
|
||||
- :ref:`Stubs & Annotations <stubs>`
|
||||
- :ref:`Caching & Recursions <caching-recursions>`
|
||||
- :ref:`Helper modules <dev-helpers>`
|
||||
|
||||
@@ -59,17 +55,17 @@ because that's where all the magic happens. I need to introduce the :ref:`parser
|
||||
Parser
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Jedi used to have it's internal parser, however this is now a separate project
|
||||
Jedi used to have its internal parser, however this is now a separate project
|
||||
and is called `parso <http://parso.readthedocs.io>`_.
|
||||
|
||||
The parser creates a syntax tree that |jedi| analyses and tries to understand.
|
||||
The grammar that this parsers uses is very similar to the official Python
|
||||
The grammar that this parser uses is very similar to the official Python
|
||||
`grammar files <https://docs.python.org/3/reference/grammar.html>`_.
|
||||
|
||||
.. _inference:
|
||||
|
||||
Type inference of python code (inference/__init__.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: jedi.inference
|
||||
|
||||
@@ -80,7 +76,7 @@ Inference Values (inference/base_value.py)
|
||||
|
||||
.. inheritance-diagram::
|
||||
jedi.inference.value.instance.TreeInstance
|
||||
jedi.inference.value.klass.Classvalue
|
||||
jedi.inference.value.klass.ClassValue
|
||||
jedi.inference.value.function.FunctionValue
|
||||
jedi.inference.value.function.FunctionExecutionContext
|
||||
:parts: 1
|
||||
@@ -89,7 +85,7 @@ Inference Values (inference/base_value.py)
|
||||
.. _name_resolution:
|
||||
|
||||
Name resolution (inference/finder.py)
|
||||
++++++++++++++++++++++++++++++++++++
|
||||
+++++++++++++++++++++++++++++++++++++
|
||||
|
||||
.. automodule:: jedi.inference.finder
|
||||
|
||||
@@ -114,7 +110,7 @@ Core Extensions
|
||||
Core Extensions is a summary of the following topics:
|
||||
|
||||
- :ref:`Iterables & Dynamic Arrays <iterables>`
|
||||
- :ref:`Dynamic Parameters <dynamic>`
|
||||
- :ref:`Dynamic Parameters <dynamic_params>`
|
||||
- :ref:`Docstrings <docstrings>`
|
||||
- :ref:`Refactoring <refactoring>`
|
||||
|
||||
@@ -125,7 +121,7 @@ without some features.
|
||||
.. _iterables:
|
||||
|
||||
Iterables & Dynamic Arrays (inference/value/iterable.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To understand Python on a deeper level, |jedi| needs to understand some of the
|
||||
dynamic features of Python like lists that are filled after creation:
|
||||
@@ -133,33 +129,33 @@ dynamic features of Python like lists that are filled after creation:
|
||||
.. automodule:: jedi.inference.value.iterable
|
||||
|
||||
|
||||
.. _dynamic:
|
||||
.. _dynamic_params:
|
||||
|
||||
Parameter completion (inference/dynamic.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Parameter completion (inference/dynamic_params.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: jedi.inference.dynamic
|
||||
.. automodule:: jedi.inference.dynamic_params
|
||||
|
||||
|
||||
.. _docstrings:
|
||||
|
||||
Docstrings (inference/docstrings.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: jedi.inference.docstrings
|
||||
|
||||
.. _refactoring:
|
||||
|
||||
Refactoring (inference/refactoring.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Refactoring (inference/api/refactoring.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: jedi.refactoring
|
||||
.. automodule:: jedi.api.refactoring
|
||||
|
||||
|
||||
.. _imports-modules:
|
||||
|
||||
Imports & Modules
|
||||
-------------------
|
||||
-----------------
|
||||
|
||||
|
||||
- :ref:`Modules <modules>`
|
||||
@@ -170,7 +166,7 @@ Imports & Modules
|
||||
.. _builtin:
|
||||
|
||||
Compiled Modules (inference/compiled.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: jedi.inference.compiled
|
||||
|
||||
@@ -178,10 +174,16 @@ Compiled Modules (inference/compiled.py)
|
||||
.. _imports:
|
||||
|
||||
Imports (inference/imports.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: jedi.inference.imports
|
||||
|
||||
.. _stubs:
|
||||
|
||||
Stubs & Annotations (inference/gradual)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: jedi.inference.gradual
|
||||
|
||||
.. _caching-recursions:
|
||||
|
||||
@@ -210,13 +212,8 @@ Recursions (recursion.py)
|
||||
.. _dev-helpers:
|
||||
|
||||
Helper Modules
|
||||
---------------
|
||||
--------------
|
||||
|
||||
Most other modules are not really central to how Jedi works. They all contain
|
||||
relevant code, but you if you understand the modules above, you pretty much
|
||||
understand Jedi.
|
||||
|
||||
Python 2/3 compatibility (_compatibility.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: jedi._compatibility
|
||||
|
||||
@@ -1,29 +1,30 @@
|
||||
.. include:: ../global.rst
|
||||
|
||||
Features and Caveats
|
||||
====================
|
||||
Features and Limitations
|
||||
========================
|
||||
|
||||
Jedi obviously supports autocompletion. It's also possible to get it working in
|
||||
(:ref:`your REPL (IPython, etc.) <repl-completion>`).
|
||||
Jedi's main API calls and features are:
|
||||
|
||||
Static analysis is also possible by using ``jedi.Script(...).get_names``.
|
||||
- Autocompletion: :meth:`.Script.complete`; It's also possible to get it
|
||||
working in :ref:`your REPL (IPython, etc.) <repl-completion>`
|
||||
- Goto/Type Inference: :meth:`.Script.goto` and :meth:`.Script.infer`
|
||||
- Static Analysis: :meth:`.Script.get_names` and :meth:`.Script.get_syntax_errors`
|
||||
- Refactorings: :meth:`.Script.rename`, :meth:`.Script.inline`,
|
||||
:meth:`.Script.extract_variable` and :meth:`.Script.extract_function`
|
||||
- Code Search: :meth:`.Script.search` and :meth:`.Project.search`
|
||||
|
||||
Jedi would in theory support refactoring, but we have never publicized it,
|
||||
because it's not production ready. If you're interested in helping out here,
|
||||
let me know. With the latest parser changes, it should be very easy to actually
|
||||
make it work.
|
||||
Basic Features
|
||||
--------------
|
||||
|
||||
|
||||
General Features
|
||||
----------------
|
||||
|
||||
- Python 2.7 and 3.4+ support
|
||||
- Python 2.7 and 3.5+ support
|
||||
- Ignores syntax errors and wrong indentation
|
||||
- Can deal with complex module / function / class structures
|
||||
- Great Virtualenv support
|
||||
- Can infer function arguments from sphinx, epydoc and basic numpydoc docstrings,
|
||||
and PEP0484-style type hints (:ref:`type hinting <type-hinting>`)
|
||||
- Stub files
|
||||
- Great ``virtualenv``/``venv`` support
|
||||
- Works great with Python's :ref:`type hinting <type-hinting>`,
|
||||
- Understands stub files
|
||||
- Can infer function arguments for sphinx, epydoc and basic numpydoc docstrings
|
||||
- Is overall a very solid piece of software that has been refined for a long
|
||||
time. Bug reports are very welcome and are usually fixed within a few weeks.
|
||||
|
||||
|
||||
Supported Python Features
|
||||
@@ -38,7 +39,7 @@ Supported Python Features
|
||||
- ``*args`` / ``**kwargs``
|
||||
- decorators / lambdas / closures
|
||||
- generators / iterators
|
||||
- some descriptors: property / staticmethod / classmethod
|
||||
- descriptors: property / staticmethod / classmethod / custom descriptors
|
||||
- some magic methods: ``__call__``, ``__iter__``, ``__next__``, ``__get__``,
|
||||
``__getitem__``, ``__init__``
|
||||
- ``list.append()``, ``set.add()``, ``list.extend()``, etc.
|
||||
@@ -46,191 +47,64 @@ Supported Python Features
|
||||
- relative imports
|
||||
- ``getattr()`` / ``__getattr__`` / ``__getattribute__``
|
||||
- function annotations
|
||||
- class decorators (py3k feature, are being ignored too, until I find a use
|
||||
case, that doesn't work with |jedi|)
|
||||
- simple/usual ``sys.path`` modifications
|
||||
- simple/typical ``sys.path`` modifications
|
||||
- ``isinstance`` checks for if/while/assert
|
||||
- namespace packages (includes ``pkgutil``, ``pkg_resources`` and PEP420 namespaces)
|
||||
- Django / Flask / Buildout support
|
||||
- Understands Pytest fixtures
|
||||
|
||||
|
||||
Not Supported
|
||||
-------------
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
Not yet implemented:
|
||||
In general Jedi's limit are quite high, but for very big projects or very
|
||||
complex code, sometimes Jedi intentionally stops type inference, to avoid
|
||||
hanging for a long time.
|
||||
|
||||
- manipulations of instances outside the instance variables without using
|
||||
methods
|
||||
Additionally there are some Python patterns Jedi does not support. This is
|
||||
intentional and below should be a complete list:
|
||||
|
||||
Will probably never be implemented:
|
||||
|
||||
- metaclasses (how could an auto-completion ever support this)
|
||||
- Arbitrary metaclasses: Some metaclasses like enums and dataclasses are
|
||||
reimplemented in Jedi to make them work. Most of the time stubs are good
|
||||
enough to get type inference working, even when metaclasses are involved.
|
||||
- ``setattr()``, ``__import__()``
|
||||
- writing to some dicts: ``globals()``, ``locals()``, ``object.__dict__``
|
||||
- Writing to some dicts: ``globals()``, ``locals()``, ``object.__dict__``
|
||||
- Manipulations of instances outside the instance variables without using
|
||||
methods
|
||||
|
||||
|
||||
Caveats
|
||||
-------
|
||||
|
||||
**Slow Performance**
|
||||
Performance Issues
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Importing ``numpy`` can be quite slow sometimes, as well as loading the
|
||||
builtins the first time. If you want to speed things up, you could write import
|
||||
hooks in |jedi|, which preload stuff. However, once loaded, this is not a
|
||||
problem anymore. The same is true for huge modules like ``PySide``, ``wx``,
|
||||
etc.
|
||||
builtins the first time. If you want to speed things up, you could preload
|
||||
libriaries in |jedi|, with :func:`.preload_module`. However, once loaded, this
|
||||
should not be a problem anymore. The same is true for huge modules like
|
||||
``PySide``, ``wx``, ``tensorflow``, ``pandas``, etc.
|
||||
|
||||
**Security**
|
||||
Jedi does not have a very good cache layer. This is probably the biggest and
|
||||
only architectural `issue <https://github.com/davidhalter/jedi/issues/1059>`_ in
|
||||
Jedi. Unfortunately it is not easy to change that. Dave Halter is thinking
|
||||
about rewriting Jedi in Rust, but it has taken Jedi more than 8 years to reach
|
||||
version 1.0, a rewrite will probably also take years.
|
||||
|
||||
Security is an important issue for |jedi|. Therefore no Python code is
|
||||
executed. As long as you write pure Python, everything is inferred
|
||||
statically. But: If you use builtin modules (``c_builtin``) there is no other
|
||||
option than to execute those modules. However: Execute isn't that critical (as
|
||||
e.g. in pythoncomplete, which used to execute *every* import!), because it
|
||||
means one import and no more. So basically the only dangerous thing is using
|
||||
the import itself. If your ``c_builtin`` uses some strange initializations, it
|
||||
might be dangerous. But if it does you're screwed anyways, because eventually
|
||||
you're going to execute your code, which executes the import.
|
||||
Security
|
||||
--------
|
||||
|
||||
For :class:`.Script`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Recipes
|
||||
-------
|
||||
Security is an important topic for |jedi|. By default, no code is executed
|
||||
within Jedi. As long as you write pure Python, everything is inferred
|
||||
statically. If you enable ``load_unsafe_extensions=True`` for your
|
||||
:class:`.Project` and you use builtin modules (``c_builtin``) Jedi will execute
|
||||
those modules. If you don't trust a code base, please do not enable that
|
||||
option. It might lead to arbitrary code execution.
|
||||
|
||||
Here are some tips on how to use |jedi| efficiently.
|
||||
For :class:`.Interpreter`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
.. _type-hinting:
|
||||
|
||||
Type Hinting
|
||||
~~~~~~~~~~~~
|
||||
|
||||
If |jedi| cannot detect the type of a function argument correctly (due to the
|
||||
dynamic nature of Python), you can help it by hinting the type using
|
||||
one of the following docstring/annotation syntax styles:
|
||||
|
||||
**PEP-0484 style**
|
||||
|
||||
https://www.python.org/dev/peps/pep-0484/
|
||||
|
||||
function annotations
|
||||
|
||||
::
|
||||
|
||||
def myfunction(node: ProgramNode, foo: str) -> None:
|
||||
"""Do something with a ``node``.
|
||||
|
||||
"""
|
||||
node.| # complete here
|
||||
|
||||
|
||||
assignment, for-loop and with-statement type hints (all Python versions).
|
||||
Note that the type hints must be on the same line as the statement
|
||||
|
||||
::
|
||||
|
||||
x = foo() # type: int
|
||||
x, y = 2, 3 # type: typing.Optional[int], typing.Union[int, str] # typing module is mostly supported
|
||||
for key, value in foo.items(): # type: str, Employee # note that Employee must be in scope
|
||||
pass
|
||||
with foo() as f: # type: int
|
||||
print(f + 3)
|
||||
|
||||
Most of the features in PEP-0484 are supported including the typing module
|
||||
(for Python < 3.5 you have to do ``pip install typing`` to use these),
|
||||
and forward references.
|
||||
|
||||
You can also use stub files.
|
||||
|
||||
|
||||
**Sphinx style**
|
||||
|
||||
http://www.sphinx-doc.org/en/stable/domains.html#info-field-lists
|
||||
|
||||
::
|
||||
|
||||
def myfunction(node, foo):
|
||||
"""Do something with a ``node``.
|
||||
|
||||
:type node: ProgramNode
|
||||
:param str foo: foo parameter description
|
||||
|
||||
"""
|
||||
node.| # complete here
|
||||
|
||||
**Epydoc**
|
||||
|
||||
http://epydoc.sourceforge.net/manual-fields.html
|
||||
|
||||
::
|
||||
|
||||
def myfunction(node):
|
||||
"""Do something with a ``node``.
|
||||
|
||||
@type node: ProgramNode
|
||||
|
||||
"""
|
||||
node.| # complete here
|
||||
|
||||
**Numpydoc**
|
||||
|
||||
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
|
||||
|
||||
In order to support the numpydoc format, you need to install the `numpydoc
|
||||
<https://pypi.python.org/pypi/numpydoc>`__ package.
|
||||
|
||||
::
|
||||
|
||||
def foo(var1, var2, long_var_name='hi'):
|
||||
r"""A one-line summary that does not use variable names or the
|
||||
function name.
|
||||
|
||||
...
|
||||
|
||||
Parameters
|
||||
----------
|
||||
var1 : array_like
|
||||
Array_like means all those objects -- lists, nested lists,
|
||||
etc. -- that can be converted to an array. We can also
|
||||
refer to variables like `var1`.
|
||||
var2 : int
|
||||
The type above can either refer to an actual Python type
|
||||
(e.g. ``int``), or describe the type of the variable in more
|
||||
detail, e.g. ``(N,) ndarray`` or ``array_like``.
|
||||
long_variable_name : {'hi', 'ho'}, optional
|
||||
Choices in brackets, default first when optional.
|
||||
|
||||
...
|
||||
|
||||
"""
|
||||
var2.| # complete here
|
||||
|
||||
A little history
|
||||
----------------
|
||||
|
||||
The Star Wars Jedi are awesome. My Jedi software tries to imitate a little bit
|
||||
of the precognition the Jedi have. There's even an awesome `scene
|
||||
<https://youtu.be/yHRJLIf7wMU>`_ of Monty Python Jedis :-).
|
||||
|
||||
But actually the name hasn't so much to do with Star Wars. It's part of my
|
||||
second name.
|
||||
|
||||
After I explained Guido van Rossum, how some parts of my auto-completion work,
|
||||
he said (we drank a beer or two):
|
||||
|
||||
*"Oh, that worries me..."*
|
||||
|
||||
When it's finished, I hope he'll like it :-)
|
||||
|
||||
I actually started Jedi, because there were no good solutions available for VIM.
|
||||
Most auto-completions just didn't work well. The only good solution was PyCharm.
|
||||
But I like my good old VIM. Rope was never really intended to be an
|
||||
auto-completion (and also I really hate project folders for my Python scripts).
|
||||
It's more of a refactoring suite. So I decided to do my own version of a
|
||||
completion, which would execute non-dangerous code. But I soon realized, that
|
||||
this wouldn't work. So I built an extremely recursive thing which understands
|
||||
many of Python's key features.
|
||||
|
||||
By the way, I really tried to program it as understandable as possible. But I
|
||||
think understanding it might need quite some time, because of its recursive
|
||||
nature.
|
||||
If you want security for :class:`.Interpreter`, ``do not`` use it. Jedi does
|
||||
execute properties and in general is not very careful to avoid code execution.
|
||||
This is intentional: Most people trust the code bases they have imported,
|
||||
because at that point a malicious code base would have had code execution
|
||||
already.
|
||||
|
||||
@@ -3,6 +3,15 @@
|
||||
Installation and Configuration
|
||||
==============================
|
||||
|
||||
.. warning:: Most people will want to install Jedi as a submodule/vendored and
|
||||
not through pip/system wide. The reason for this is that it makes sense that
|
||||
the plugin that uses Jedi has always access to it. Otherwise Jedi will not
|
||||
work properly when virtualenvs are activated. So please read the
|
||||
documentation of your editor/IDE plugin to install Jedi.
|
||||
|
||||
For plugin developers, Jedi works best if it is always available. Vendoring
|
||||
is a pretty good option for that.
|
||||
|
||||
You can either include |jedi| as a submodule in your text editor plugin (like
|
||||
jedi-vim_ does by default), or you can install it systemwide.
|
||||
|
||||
|
||||
@@ -3,18 +3,14 @@
|
||||
Jedi Testing
|
||||
============
|
||||
|
||||
The test suite depends on ``tox`` and ``pytest``::
|
||||
The test suite depends on ``pytest``::
|
||||
|
||||
pip install tox pytest
|
||||
pip install pytest
|
||||
|
||||
To run the tests for all supported Python versions::
|
||||
|
||||
tox
|
||||
|
||||
If you want to test only a specific Python version (e.g. Python 2.7), it's as
|
||||
If you want to test only a specific Python version (e.g. Python 3.8), it is as
|
||||
easy as::
|
||||
|
||||
tox -e py27
|
||||
python3.8 -m pytest
|
||||
|
||||
Tests are also run automatically on `Travis CI
|
||||
<https://travis-ci.org/davidhalter/jedi/>`_.
|
||||
@@ -28,8 +24,8 @@ simple and readable testing structure.
|
||||
|
||||
.. _blackbox:
|
||||
|
||||
Blackbox Tests (run.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Integration Tests (run.py)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: test.run
|
||||
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
.. include:: ../global.rst
|
||||
|
||||
End User Usage
|
||||
==============
|
||||
Using Jedi
|
||||
==========
|
||||
|
||||
If you are a not an IDE Developer, the odds are that you just want to use
|
||||
|jedi| as a browser plugin or in the shell. Yes that's :ref:`also possible
|
||||
<repl-completion>`!
|
||||
|jedi| is can be used with a variety of plugins and software. It is also possible
|
||||
to use |jedi| in the :ref:`Python shell or with IPython <repl-completion>`.
|
||||
|
||||
|jedi| is relatively young and can be used in a variety of Plugins and
|
||||
Software. If your Editor/IDE is not among them, recommend |jedi| to your IDE
|
||||
developers.
|
||||
Below you can also find a list of :ref:`recipes for type hinting <recipes>`.
|
||||
|
||||
|
||||
.. _editor-plugins:
|
||||
@@ -17,60 +14,72 @@ developers.
|
||||
Editor Plugins
|
||||
--------------
|
||||
|
||||
Vim:
|
||||
Vim
|
||||
~~~
|
||||
|
||||
- jedi-vim_
|
||||
- YouCompleteMe_
|
||||
- deoplete-jedi_
|
||||
|
||||
Emacs:
|
||||
Visual Studio Code
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- `Python Extension`_
|
||||
|
||||
Emacs
|
||||
~~~~~
|
||||
|
||||
- Jedi.el_
|
||||
- elpy_
|
||||
- anaconda-mode_
|
||||
|
||||
Sublime Text 2/3:
|
||||
Sublime Text 2/3
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
- SublimeJEDI_ (ST2 & ST3)
|
||||
- anaconda_ (only ST3)
|
||||
|
||||
SynWrite:
|
||||
SynWrite
|
||||
~~~~~~~~
|
||||
|
||||
- SynJedi_
|
||||
|
||||
TextMate:
|
||||
TextMate
|
||||
~~~~~~~~
|
||||
|
||||
- Textmate_ (Not sure if it's actually working)
|
||||
|
||||
Kate:
|
||||
Kate
|
||||
~~~~
|
||||
|
||||
- Kate_ version 4.13+ `supports it natively
|
||||
<https://projects.kde.org/projects/kde/applications/kate/repository/entry/addons/kate/pate/src/plugins/python_autocomplete_jedi.py?rev=KDE%2F4.13>`__,
|
||||
you have to enable it, though.
|
||||
|
||||
Visual Studio Code:
|
||||
|
||||
- `Python Extension`_
|
||||
|
||||
Atom:
|
||||
Atom
|
||||
~~~~
|
||||
|
||||
- autocomplete-python-jedi_
|
||||
|
||||
GNOME Builder:
|
||||
GNOME Builder
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
- `GNOME Builder`_ `supports it natively
|
||||
<https://git.gnome.org/browse/gnome-builder/tree/plugins/jedi>`__,
|
||||
and is enabled by default.
|
||||
|
||||
Gedit:
|
||||
Gedit
|
||||
~~~~~
|
||||
|
||||
- gedi_
|
||||
|
||||
Eric IDE:
|
||||
Eric IDE
|
||||
~~~~~~~~
|
||||
|
||||
- `Eric IDE`_ (Available as a plugin)
|
||||
|
||||
Web Debugger:
|
||||
Web Debugger
|
||||
~~~~~~~~~~~~
|
||||
|
||||
- wdb_
|
||||
|
||||
@@ -81,11 +90,14 @@ and many more!
|
||||
Tab Completion in the Python Shell
|
||||
----------------------------------
|
||||
|
||||
Starting with Ipython `6.0.0` Jedi is a dependency of IPython. Autocompletion
|
||||
in IPython is therefore possible without additional configuration.
|
||||
Jedi is a dependency of IPython. Autocompletion in IPython is therefore
|
||||
possible without additional configuration.
|
||||
|
||||
Here is an `example video <https://vimeo.com/122332037>`_ how REPL completion
|
||||
can look like in a different shell.
|
||||
|
||||
There are two different options how you can use Jedi autocompletion in
|
||||
your Python interpreter. One with your custom ``$HOME/.pythonrc.py`` file
|
||||
your ``python`` interpreter. One with your custom ``$HOME/.pythonrc.py`` file
|
||||
and one that uses ``PYTHONSTARTUP``.
|
||||
|
||||
Using ``PYTHONSTARTUP``
|
||||
@@ -93,11 +105,137 @@ Using ``PYTHONSTARTUP``
|
||||
|
||||
.. automodule:: jedi.api.replstartup
|
||||
|
||||
Using a custom ``$HOME/.pythonrc.py``
|
||||
Using a Custom ``$HOME/.pythonrc.py``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: jedi.utils.setup_readline
|
||||
|
||||
.. _recipes:
|
||||
|
||||
Recipes
|
||||
-------
|
||||
|
||||
Here are some tips on how to use |jedi| efficiently.
|
||||
|
||||
|
||||
.. _type-hinting:
|
||||
|
||||
Type Hinting
|
||||
~~~~~~~~~~~~
|
||||
|
||||
If |jedi| cannot detect the type of a function argument correctly (due to the
|
||||
dynamic nature of Python), you can help it by hinting the type using
|
||||
one of the docstring/annotation styles below. **Only gradual typing will
|
||||
always work**, all the docstring solutions are glorified hacks and more
|
||||
complicated cases will probably not work.
|
||||
|
||||
Official Gradual Typing (Recommended)
|
||||
+++++++++++++++++++++++++++++++++++++
|
||||
|
||||
You can read a lot about Python's gradual typing system in the corresponding
|
||||
PEPs like:
|
||||
|
||||
- `PEP 484 <https://www.python.org/dev/peps/pep-0484/>`_ as an introduction
|
||||
- `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_ for variable annotations
|
||||
- `PEP 589 <https://www.python.org/dev/peps/pep-0589/>`_ for ``TypeDict``
|
||||
- There are probably more :)
|
||||
|
||||
Below you can find a few examples how you can use this feature.
|
||||
|
||||
Function annotations::
|
||||
|
||||
def myfunction(node: ProgramNode, foo: str) -> None:
|
||||
"""Do something with a ``node``.
|
||||
|
||||
"""
|
||||
node.| # complete here
|
||||
|
||||
|
||||
Assignment, for-loop and with-statement type hints::
|
||||
|
||||
import typing
|
||||
x: int = foo()
|
||||
y: typing.Optional[int] = 3
|
||||
|
||||
key: str
|
||||
value: Employee
|
||||
for key, value in foo.items():
|
||||
pass
|
||||
|
||||
f: Union[int, float]
|
||||
with foo() as f:
|
||||
print(f + 3)
|
||||
|
||||
PEP-0484 should be supported in its entirety. Feel free to open issues if that
|
||||
is not the case. You can also use stub files.
|
||||
|
||||
|
||||
Sphinx style
|
||||
++++++++++++
|
||||
|
||||
http://www.sphinx-doc.org/en/stable/domains.html#info-field-lists
|
||||
|
||||
::
|
||||
|
||||
def myfunction(node, foo):
|
||||
"""
|
||||
Do something with a ``node``.
|
||||
|
||||
:type node: ProgramNode
|
||||
:param str foo: foo parameter description
|
||||
"""
|
||||
node.| # complete here
|
||||
|
||||
Epydoc
|
||||
++++++
|
||||
|
||||
http://epydoc.sourceforge.net/manual-fields.html
|
||||
|
||||
::
|
||||
|
||||
def myfunction(node):
|
||||
"""
|
||||
Do something with a ``node``.
|
||||
|
||||
@type node: ProgramNode
|
||||
"""
|
||||
node.| # complete here
|
||||
|
||||
Numpydoc
|
||||
++++++++
|
||||
|
||||
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
|
||||
|
||||
In order to support the numpydoc format, you need to install the `numpydoc
|
||||
<https://pypi.python.org/pypi/numpydoc>`__ package.
|
||||
|
||||
::
|
||||
|
||||
def foo(var1, var2, long_var_name='hi'):
|
||||
r"""
|
||||
A one-line summary that does not use variable names or the
|
||||
function name.
|
||||
|
||||
...
|
||||
|
||||
Parameters
|
||||
----------
|
||||
var1 : array_like
|
||||
Array_like means all those objects -- lists, nested lists,
|
||||
etc. -- that can be converted to an array. We can also
|
||||
refer to variables like `var1`.
|
||||
var2 : int
|
||||
The type above can either refer to an actual Python type
|
||||
(e.g. ``int``), or describe the type of the variable in more
|
||||
detail, e.g. ``(N,) ndarray`` or ``array_like``.
|
||||
long_variable_name : {'hi', 'ho'}, optional
|
||||
Choices in brackets, default first when optional.
|
||||
|
||||
...
|
||||
|
||||
"""
|
||||
var2.| # complete here
|
||||
|
||||
.. _jedi-vim: https://github.com/davidhalter/jedi-vim
|
||||
.. _youcompleteme: https://valloric.github.io/YouCompleteMe/
|
||||
.. _deoplete-jedi: https://github.com/zchee/deoplete-jedi
|
||||
@@ -114,4 +252,4 @@ Using a custom ``$HOME/.pythonrc.py``
|
||||
.. _GNOME Builder: https://wiki.gnome.org/Apps/Builder/
|
||||
.. _gedi: https://github.com/isamert/gedi
|
||||
.. _Eric IDE: https://eric-ide.python-projects.org
|
||||
.. _Python Extension: https://marketplace.visualstudio.com/items?itemName=donjayamanne.python
|
||||
.. _Python Extension: https://marketplace.visualstudio.com/items?itemName=ms-python.python
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
:orphan:
|
||||
|
||||
.. |jedi| replace:: *Jedi*
|
||||
.. |jedi| replace:: Jedi
|
||||
|
||||
@@ -1,13 +1,40 @@
|
||||
.. include global.rst
|
||||
|
||||
Jedi - an awesome autocompletion/static analysis library for Python
|
||||
===================================================================
|
||||
.. meta::
|
||||
:github_url: https://github.com/davidhalter/jedi
|
||||
|
||||
Release v\ |release|. (:doc:`Installation <docs/installation>`)
|
||||
Jedi - an awesome autocompletion, static analysis and refactoring library for Python
|
||||
====================================================================================
|
||||
|
||||
.. image:: https://img.shields.io/github/stars/davidhalter/jedi.svg?style=social&label=Star&maxAge=2592000
|
||||
:target: https://github.com/davidhalter/jedi
|
||||
:alt: GitHub stars
|
||||
|
||||
.. image:: http://isitmaintained.com/badge/open/davidhalter/jedi.svg
|
||||
:target: https://github.com/davidhalter/jedi/issues
|
||||
:alt: The percentage of open issues and pull requests
|
||||
|
||||
.. image:: http://isitmaintained.com/badge/resolution/davidhalter/jedi.svg
|
||||
:target: https://github.com/davidhalter/jedi/issues
|
||||
:alt: The resolution time is the median time an issue or pull request stays open.
|
||||
|
||||
.. image:: https://travis-ci.org/davidhalter/jedi.svg?branch=master
|
||||
:target: https://travis-ci.org/davidhalter/jedi
|
||||
:alt: Linux Tests
|
||||
|
||||
.. image:: https://ci.appveyor.com/api/projects/status/mgva3bbawyma1new/branch/master?svg=true
|
||||
:target: https://ci.appveyor.com/project/davidhalter/jedi/branch/master
|
||||
:alt: Windows Tests
|
||||
|
||||
.. image:: https://coveralls.io/repos/davidhalter/jedi/badge.svg?branch=master
|
||||
:target: https://coveralls.io/r/davidhalter/jedi
|
||||
:alt: Coverage status
|
||||
|
||||
`Github Repository <https://github.com/davidhalter/jedi>`_
|
||||
|
||||
.. automodule:: jedi
|
||||
|
||||
Autocompletion can look like this (e.g. VIM plugin):
|
||||
Autocompletion can for example look like this in jedi-vim:
|
||||
|
||||
.. figure:: _screenshots/screenshot_complete.png
|
||||
|
||||
@@ -18,16 +45,18 @@ Docs
|
||||
----
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:maxdepth: 1
|
||||
|
||||
docs/usage
|
||||
docs/installation
|
||||
docs/features
|
||||
docs/api
|
||||
docs/api-classes
|
||||
docs/installation
|
||||
docs/settings
|
||||
docs/development
|
||||
docs/testing
|
||||
docs/acknowledgements
|
||||
docs/changelog
|
||||
|
||||
|
||||
.. _resources:
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
"""
|
||||
Jedi is a static analysis tool for Python that can be used in IDEs/editors.
|
||||
Jedi has a focus on autocompletion and goto functionality. Jedi is fast and is
|
||||
very well tested. It understands Python and stubs on a deep level.
|
||||
Jedi is a static analysis tool for Python that is typically used in
|
||||
IDEs/editors plugins. Jedi has a focus on autocompletion and goto
|
||||
functionality. Other features include refactoring, code search and finding
|
||||
references.
|
||||
|
||||
Jedi has support for different goto functions. It's possible to search for
|
||||
references and list names in a Python file to get information about them.
|
||||
|
||||
Jedi uses a very simple API to connect with IDE's. There's a reference
|
||||
implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
|
||||
which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
|
||||
Autocompletion in your REPL is also possible, IPython uses it natively and for
|
||||
the CPython REPL you have to install it.
|
||||
Jedi has a simple API to work with. There is a reference implementation as a
|
||||
`VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_. Autocompletion in your
|
||||
REPL is also possible, IPython uses it natively and for the CPython REPL you
|
||||
can install it. Jedi is well tested and bugs should be rare.
|
||||
|
||||
Here's a simple example of the autocompletion feature:
|
||||
|
||||
@@ -28,20 +25,19 @@ Here's a simple example of the autocompletion feature:
|
||||
ad
|
||||
>>> print(completions[0].name)
|
||||
load
|
||||
|
||||
As you see Jedi is pretty simple and allows you to concentrate on writing a
|
||||
good text editor, while still having very good IDE features for Python.
|
||||
"""
|
||||
|
||||
__version__ = '0.16.0'
|
||||
__version__ = '0.17.0'
|
||||
|
||||
from jedi.api import Script, Interpreter, set_debug_function, \
|
||||
preload_module, names
|
||||
from jedi import settings
|
||||
from jedi.api.environment import find_virtualenvs, find_system_environments, \
|
||||
get_default_environment, InvalidPythonEnvironment, create_environment, \
|
||||
get_system_environment
|
||||
from jedi.api.exceptions import InternalError
|
||||
get_system_environment, InterpreterEnvironment
|
||||
from jedi.api.project import Project, get_default_project
|
||||
from jedi.api.exceptions import InternalError, RefactoringError
|
||||
|
||||
# Finally load the internal plugins. This is only internal.
|
||||
from jedi.plugins import registry
|
||||
del registry
|
||||
|
||||
@@ -27,8 +27,8 @@ def _start_linter():
|
||||
paths = [path]
|
||||
|
||||
try:
|
||||
for path in paths:
|
||||
for error in jedi.Script(path=path)._analysis():
|
||||
for p in paths:
|
||||
for error in jedi.Script(path=p)._analysis():
|
||||
print(error)
|
||||
except Exception:
|
||||
if '--pdb' in sys.argv:
|
||||
@@ -40,9 +40,24 @@ def _start_linter():
|
||||
raise
|
||||
|
||||
|
||||
def _complete():
|
||||
import jedi
|
||||
import pdb
|
||||
|
||||
try:
|
||||
for c in jedi.Script(sys.argv[2]).complete():
|
||||
c.docstring()
|
||||
c.type
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pdb.post_mortem()
|
||||
|
||||
|
||||
if len(sys.argv) == 2 and sys.argv[1] == 'repl':
|
||||
# don't want to use __main__ only for repl yet, maybe we want to use it for
|
||||
# something else. So just use the keyword ``repl`` for now.
|
||||
print(join(dirname(abspath(__file__)), 'api', 'replstartup.py'))
|
||||
elif len(sys.argv) > 1 and sys.argv[1] == 'linter':
|
||||
_start_linter()
|
||||
elif len(sys.argv) > 1 and sys.argv[1] == '_complete':
|
||||
_complete()
|
||||
|
||||
@@ -11,7 +11,6 @@ import os
|
||||
import re
|
||||
import pkgutil
|
||||
import warnings
|
||||
import inspect
|
||||
import subprocess
|
||||
import weakref
|
||||
try:
|
||||
@@ -113,7 +112,12 @@ def find_module_py33(string, path=None, loader=None, full_name=None, is_global_s
|
||||
|
||||
|
||||
def _from_loader(loader, string):
|
||||
is_package = loader.is_package(string)
|
||||
try:
|
||||
is_package_method = loader.is_package
|
||||
except AttributeError:
|
||||
is_package = False
|
||||
else:
|
||||
is_package = is_package_method(string)
|
||||
try:
|
||||
get_filename = loader.get_filename
|
||||
except AttributeError:
|
||||
@@ -123,7 +127,11 @@ def _from_loader(loader, string):
|
||||
|
||||
# To avoid unicode and read bytes, "overwrite" loader.get_source if
|
||||
# possible.
|
||||
f = type(loader).get_source
|
||||
try:
|
||||
f = type(loader).get_source
|
||||
except AttributeError:
|
||||
raise ImportError("get_source was not defined on loader")
|
||||
|
||||
if is_py3 and f is not importlib.machinery.SourceFileLoader.get_source:
|
||||
# Unfortunately we are reading unicode here, not bytes.
|
||||
# It seems hard to get bytes, because the zip importer
|
||||
@@ -178,7 +186,6 @@ def find_module_pre_py3(string, path=None, full_name=None, is_global_search=True
|
||||
module_file = None
|
||||
|
||||
if module_file is None:
|
||||
code = None
|
||||
return None, is_package
|
||||
|
||||
with module_file:
|
||||
@@ -210,65 +217,6 @@ if the module is contained in a package.
|
||||
"""
|
||||
|
||||
|
||||
def _iter_modules(paths, prefix=''):
|
||||
# Copy of pkgutil.iter_modules adapted to work with namespaces
|
||||
|
||||
for path in paths:
|
||||
importer = pkgutil.get_importer(path)
|
||||
|
||||
if not isinstance(importer, importlib.machinery.FileFinder):
|
||||
# We're only modifying the case for FileFinder. All the other cases
|
||||
# still need to be checked (like zip-importing). Do this by just
|
||||
# calling the pkgutil version.
|
||||
for mod_info in pkgutil.iter_modules([path], prefix):
|
||||
yield mod_info
|
||||
continue
|
||||
|
||||
# START COPY OF pkutils._iter_file_finder_modules.
|
||||
if importer.path is None or not os.path.isdir(importer.path):
|
||||
return
|
||||
|
||||
yielded = {}
|
||||
|
||||
try:
|
||||
filenames = os.listdir(importer.path)
|
||||
except OSError:
|
||||
# ignore unreadable directories like import does
|
||||
filenames = []
|
||||
filenames.sort() # handle packages before same-named modules
|
||||
|
||||
for fn in filenames:
|
||||
modname = inspect.getmodulename(fn)
|
||||
if modname == '__init__' or modname in yielded:
|
||||
continue
|
||||
|
||||
# jedi addition: Avoid traversing special directories
|
||||
if fn.startswith('.') or fn == '__pycache__':
|
||||
continue
|
||||
|
||||
path = os.path.join(importer.path, fn)
|
||||
ispkg = False
|
||||
|
||||
if not modname and os.path.isdir(path) and '.' not in fn:
|
||||
modname = fn
|
||||
# A few jedi modifications: Don't check if there's an
|
||||
# __init__.py
|
||||
try:
|
||||
os.listdir(path)
|
||||
except OSError:
|
||||
# ignore unreadable directories like import does
|
||||
continue
|
||||
ispkg = True
|
||||
|
||||
if modname and '.' not in modname:
|
||||
yielded[modname] = 1
|
||||
yield importer, prefix + modname, ispkg
|
||||
# END COPY
|
||||
|
||||
|
||||
iter_modules = _iter_modules if py_version >= 34 else pkgutil.iter_modules
|
||||
|
||||
|
||||
class ImplicitNSInfo(object):
|
||||
"""Stores information returned from an implicit namespace spec"""
|
||||
def __init__(self, name, paths):
|
||||
@@ -433,64 +381,6 @@ try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
if sys.version_info[:2] == (3, 3):
|
||||
"""
|
||||
Monkeypatch the unpickler in Python 3.3. This is needed, because the
|
||||
argument `encoding='bytes'` is not supported in 3.3, but badly needed to
|
||||
communicate with Python 2.
|
||||
"""
|
||||
|
||||
class NewUnpickler(pickle._Unpickler):
|
||||
dispatch = dict(pickle._Unpickler.dispatch)
|
||||
|
||||
def _decode_string(self, value):
|
||||
# Used to allow strings from Python 2 to be decoded either as
|
||||
# bytes or Unicode strings. This should be used only with the
|
||||
# STRING, BINSTRING and SHORT_BINSTRING opcodes.
|
||||
if self.encoding == "bytes":
|
||||
return value
|
||||
else:
|
||||
return value.decode(self.encoding, self.errors)
|
||||
|
||||
def load_string(self):
|
||||
data = self.readline()[:-1]
|
||||
# Strip outermost quotes
|
||||
if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'':
|
||||
data = data[1:-1]
|
||||
else:
|
||||
raise pickle.UnpicklingError("the STRING opcode argument must be quoted")
|
||||
self.append(self._decode_string(pickle.codecs.escape_decode(data)[0]))
|
||||
dispatch[pickle.STRING[0]] = load_string
|
||||
|
||||
def load_binstring(self):
|
||||
# Deprecated BINSTRING uses signed 32-bit length
|
||||
len, = pickle.struct.unpack('<i', self.read(4))
|
||||
if len < 0:
|
||||
raise pickle.UnpicklingError("BINSTRING pickle has negative byte count")
|
||||
data = self.read(len)
|
||||
self.append(self._decode_string(data))
|
||||
dispatch[pickle.BINSTRING[0]] = load_binstring
|
||||
|
||||
def load_short_binstring(self):
|
||||
len = self.read(1)[0]
|
||||
data = self.read(len)
|
||||
self.append(self._decode_string(data))
|
||||
dispatch[pickle.SHORT_BINSTRING[0]] = load_short_binstring
|
||||
|
||||
def load(file, fix_imports=True, encoding="ASCII", errors="strict"):
|
||||
return NewUnpickler(file, fix_imports=fix_imports,
|
||||
encoding=encoding, errors=errors).load()
|
||||
|
||||
def loads(s, fix_imports=True, encoding="ASCII", errors="strict"):
|
||||
if isinstance(s, str):
|
||||
raise TypeError("Can't load pickle from unicode string")
|
||||
file = pickle.io.BytesIO(s)
|
||||
return NewUnpickler(file, fix_imports=fix_imports,
|
||||
encoding=encoding, errors=errors).load()
|
||||
|
||||
pickle.Unpickler = NewUnpickler
|
||||
pickle.load = load
|
||||
pickle.loads = loads
|
||||
|
||||
|
||||
def pickle_load(file):
|
||||
|
||||
@@ -6,12 +6,11 @@ Additionally you can add a debug function with :func:`set_debug_function`.
|
||||
Alternatively, if you don't need a custom function and are happy with printing
|
||||
debug messages to stdout, simply call :func:`set_debug_function` without
|
||||
arguments.
|
||||
|
||||
.. warning:: Please, note that Jedi is **not thread safe**.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
from functools import wraps
|
||||
|
||||
import parso
|
||||
from parso.python import tree
|
||||
@@ -26,15 +25,18 @@ from jedi.api import classes
|
||||
from jedi.api import interpreter
|
||||
from jedi.api import helpers
|
||||
from jedi.api.helpers import validate_line_column
|
||||
from jedi.api.completion import Completion
|
||||
from jedi.api.completion import Completion, search_in_module
|
||||
from jedi.api.keywords import KeywordName
|
||||
from jedi.api.environment import InterpreterEnvironment
|
||||
from jedi.api.project import get_default_project, Project
|
||||
from jedi.api.errors import parso_to_jedi_errors
|
||||
from jedi.api import refactoring
|
||||
from jedi.api.refactoring.extract import extract_function, extract_variable
|
||||
from jedi.inference import InferenceState
|
||||
from jedi.inference import imports
|
||||
from jedi.inference.references import find_references
|
||||
from jedi.inference.arguments import try_iter_content
|
||||
from jedi.inference.helpers import get_module_names, infer_call_of_leaf
|
||||
from jedi.inference.helpers import infer_call_of_leaf
|
||||
from jedi.inference.sys_path import transform_path_to_dotted
|
||||
from jedi.inference.syntax_tree import tree_name_to_values
|
||||
from jedi.inference.value import ModuleValue
|
||||
@@ -42,82 +44,149 @@ from jedi.inference.base_value import ValueSet
|
||||
from jedi.inference.value.iterable import unpack_tuple_to_dict
|
||||
from jedi.inference.gradual.conversion import convert_names, convert_values
|
||||
from jedi.inference.gradual.utils import load_proper_stub_module
|
||||
from jedi.inference.utils import to_list
|
||||
|
||||
# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
|
||||
# can remove some "maximum recursion depth" errors.
|
||||
sys.setrecursionlimit(3000)
|
||||
|
||||
|
||||
def _no_python2_support(func):
|
||||
# TODO remove when removing Python 2/3.5
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
if self._inference_state.grammar.version_info < (3, 6) or sys.version_info < (3, 6):
|
||||
raise NotImplementedError(
|
||||
"No support for refactorings/search on Python 2/3.5"
|
||||
)
|
||||
return func(self, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
class Script(object):
|
||||
"""
|
||||
A Script is the base for completions, goto or whatever you want to do with
|
||||
|jedi|.
|
||||
Jedi. The counter part of this class is :class:`Interpreter`, which works
|
||||
with actual dictionaries and can work with a REPL. This class
|
||||
should be used when a user edits code in an editor.
|
||||
|
||||
You can either use the ``source`` parameter or ``path`` to read a file.
|
||||
You can either use the ``code`` parameter or ``path`` to read a file.
|
||||
Usually you're going to want to use both of them (in an editor).
|
||||
|
||||
The script might be analyzed in a different ``sys.path`` than |jedi|:
|
||||
The Script's ``sys.path`` is very customizable:
|
||||
|
||||
- if `sys_path` parameter is not ``None``, it will be used as ``sys.path``
|
||||
for the script;
|
||||
- If `project` is provided with a ``sys_path``, that is going to be used.
|
||||
- If `environment` is provided, its ``sys.path`` will be used
|
||||
(see :func:`Environment.get_sys_path <jedi.api.environment.Environment.get_sys_path>`);
|
||||
- Otherwise ``sys.path`` will match that of the default environment of
|
||||
Jedi, which typically matches the sys path that was used at the time
|
||||
when Jedi was imported.
|
||||
|
||||
- if `sys_path` parameter is ``None`` and ``VIRTUAL_ENV`` environment
|
||||
variable is defined, ``sys.path`` for the specified environment will be
|
||||
guessed (see :func:`jedi.inference.sys_path.get_venv_path`) and used for
|
||||
the script;
|
||||
Most methods have a ``line`` and a ``column`` parameter. Lines in Jedi are
|
||||
always 1-based and columns are always zero based. To avoid repetition they
|
||||
are not always documented. You can omit both line and column. Jedi will
|
||||
then just do whatever action you are calling at the end of the file. If you
|
||||
provide only the line, just will complete at the end of that line.
|
||||
|
||||
- otherwise ``sys.path`` will match that of |jedi|.
|
||||
.. warning:: By default :attr:`jedi.settings.fast_parser` is enabled, which means
|
||||
that parso reuses modules (i.e. they are not immutable). With this setting
|
||||
Jedi is **not thread safe** and it is also not safe to use multiple
|
||||
:class:`.Script` instances and its definitions at the same time.
|
||||
|
||||
:param source: The source code of the current file, separated by newlines.
|
||||
:type source: str
|
||||
:param line: Deprecated, please use it directly on e.g. `.complete`
|
||||
If you are a normal plugin developer this should not be an issue. It is
|
||||
an issue for people that do more complex stuff with Jedi.
|
||||
|
||||
This is purely a performance optimization and works pretty well for all
|
||||
typical usages, however consider to turn the setting of if it causes
|
||||
you problems. See also
|
||||
`this discussion <https://github.com/davidhalter/jedi/issues/1240>`_.
|
||||
|
||||
:param code: The source code of the current file, separated by newlines.
|
||||
:type code: str
|
||||
:param line: Deprecated, please use it directly on e.g. ``.complete``
|
||||
:type line: int
|
||||
:param column: Deprecated, please use it directly on e.g. `.complete`
|
||||
:param column: Deprecated, please use it directly on e.g. ``.complete``
|
||||
:type column: int
|
||||
:param path: The path of the file in the file system, or ``''`` if
|
||||
it hasn't been saved yet.
|
||||
:type path: str or None
|
||||
:param encoding: The encoding of ``source``, if it is not a
|
||||
``unicode`` object (default ``'utf-8'``).
|
||||
:param encoding: Deprecated, cast to unicode yourself. The encoding of
|
||||
``code``, if it is not a ``unicode`` object (default ``'utf-8'``).
|
||||
:type encoding: str
|
||||
:param sys_path: ``sys.path`` to use during analysis of the script
|
||||
:type sys_path: list
|
||||
:param environment: TODO
|
||||
:type environment: Environment
|
||||
:param sys_path: Deprecated, use the project parameter.
|
||||
:type sys_path: typing.List[str]
|
||||
:param Environment environment: Provide a predefined :ref:`Environment <environments>`
|
||||
to work with a specific Python version or virtualenv.
|
||||
:param Project project: Provide a :class:`.Project` to make sure finding
|
||||
references works well, because the right folder is searched. There are
|
||||
also ways to modify the sys path and other things.
|
||||
"""
|
||||
def __init__(self, source=None, line=None, column=None, path=None,
|
||||
encoding='utf-8', sys_path=None, environment=None,
|
||||
_project=None):
|
||||
def __init__(self, code=None, line=None, column=None, path=None,
|
||||
encoding=None, sys_path=None, environment=None,
|
||||
project=None, source=None):
|
||||
self._orig_path = path
|
||||
# An empty path (also empty string) should always result in no path.
|
||||
self.path = os.path.abspath(path) if path else None
|
||||
|
||||
if source is None:
|
||||
if encoding is None:
|
||||
encoding = 'utf-8'
|
||||
else:
|
||||
warnings.warn(
|
||||
"Deprecated since version 0.17.0. You should cast to valid "
|
||||
"unicode yourself, especially if you are not using utf-8.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
if line is not None:
|
||||
warnings.warn(
|
||||
"Providing the line is now done in the functions themselves "
|
||||
"like `Script(...).complete(line, column)`",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
if column is not None:
|
||||
warnings.warn(
|
||||
"Providing the column is now done in the functions themselves "
|
||||
"like `Script(...).complete(line, column)`",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
if source is not None:
|
||||
code = source
|
||||
warnings.warn(
|
||||
"Use the code keyword argument instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
if code is None:
|
||||
# TODO add a better warning than the traceback!
|
||||
with open(path, 'rb') as f:
|
||||
source = f.read()
|
||||
|
||||
# Load the Python grammar of the current interpreter.
|
||||
self._grammar = parso.load_grammar()
|
||||
code = f.read()
|
||||
|
||||
if sys_path is not None and not is_py3:
|
||||
sys_path = list(map(force_unicode, sys_path))
|
||||
|
||||
project = _project
|
||||
if project is None:
|
||||
# Load the Python grammar of the current interpreter.
|
||||
project = get_default_project(
|
||||
os.path.dirname(self.path)if path else os.getcwd()
|
||||
os.path.dirname(self.path) if path else None
|
||||
)
|
||||
# TODO deprecate and remove sys_path from the Script API.
|
||||
if sys_path is not None:
|
||||
project._sys_path = sys_path
|
||||
warnings.warn(
|
||||
"Deprecated since version 0.17.0. Use the project API instead, "
|
||||
"which means Script(project=Project(dir, sys_path=sys_path)) instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
|
||||
self._inference_state = InferenceState(
|
||||
project, environment=environment, script_path=self.path
|
||||
)
|
||||
debug.speed('init')
|
||||
self._module_node, source = self._inference_state.parse_and_get_code(
|
||||
code=source,
|
||||
self._module_node, code = self._inference_state.parse_and_get_code(
|
||||
code=code,
|
||||
path=self.path,
|
||||
encoding=encoding,
|
||||
use_latest_grammar=path and path.endswith('.pyi'),
|
||||
@@ -126,8 +195,8 @@ class Script(object):
|
||||
cache_path=settings.cache_directory,
|
||||
)
|
||||
debug.speed('parsed')
|
||||
self._code_lines = parso.split_lines(source, keepends=True)
|
||||
self._code = source
|
||||
self._code_lines = parso.split_lines(code, keepends=True)
|
||||
self._code = code
|
||||
self._pos = line, column
|
||||
|
||||
cache.clear_time_caches()
|
||||
@@ -191,13 +260,17 @@ class Script(object):
|
||||
@validate_line_column
|
||||
def complete(self, line=None, column=None, **kwargs):
|
||||
"""
|
||||
Return :class:`classes.Completion` objects. Those objects contain
|
||||
information about the completions, more than just names.
|
||||
Completes objects under the cursor.
|
||||
|
||||
Those objects contain information about the completions, more than just
|
||||
names.
|
||||
|
||||
:param fuzzy: Default False. Will return fuzzy completions, which means
|
||||
that e.g. ``ooa`` will match ``foobar``.
|
||||
:return: Completion objects, sorted by name and ``__`` comes last.
|
||||
:rtype: list of :class:`classes.Completion`
|
||||
:return: Completion objects, sorted by name. Normal names appear
|
||||
before "private" names that start with ``_`` and those appear
|
||||
before magic methods and name mangled names that start with ``__``.
|
||||
:rtype: list of :class:`.Completion`
|
||||
"""
|
||||
return self._complete(line, column, **kwargs)
|
||||
|
||||
@@ -210,30 +283,39 @@ class Script(object):
|
||||
return completion.complete()
|
||||
|
||||
def completions(self, fuzzy=False):
|
||||
# Deprecated, will be removed.
|
||||
warnings.warn(
|
||||
"Deprecated since version 0.16.0. Use Script(...).complete instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return self.complete(*self._pos, fuzzy=fuzzy)
|
||||
|
||||
@validate_line_column
|
||||
def infer(self, line=None, column=None, **kwargs):
|
||||
"""
|
||||
Return the definitions of a the path under the cursor. goto function!
|
||||
This follows complicated paths and returns the end, not the first
|
||||
definition. The big difference between :meth:`goto` and
|
||||
Return the definitions of under the cursor. It is basically a wrapper
|
||||
around Jedi's type inference.
|
||||
|
||||
This method follows complicated paths and returns the end, not the
|
||||
first definition. The big difference between :meth:`goto` and
|
||||
:meth:`infer` is that :meth:`goto` doesn't
|
||||
follow imports and statements. Multiple objects may be returned,
|
||||
because Python itself is a dynamic language, which means depending on
|
||||
an option you can have two different versions of a function.
|
||||
because depending on an option you can have two different versions of a
|
||||
function.
|
||||
|
||||
:param only_stubs: Only return stubs for this goto call.
|
||||
:param prefer_stubs: Prefer stubs to Python objects for this type
|
||||
inference call.
|
||||
:rtype: list of :class:`classes.Definition`
|
||||
:param only_stubs: Only return stubs for this method.
|
||||
:param prefer_stubs: Prefer stubs to Python objects for this method.
|
||||
:rtype: list of :class:`.Name`
|
||||
"""
|
||||
with debug.increase_indent_cm('infer'):
|
||||
return self._infer(line, column, **kwargs)
|
||||
|
||||
def goto_definitions(self, **kwargs):
|
||||
# Deprecated, will be removed.
|
||||
warnings.warn(
|
||||
"Deprecated since version 0.16.0. Use Script(...).infer instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return self.infer(*self._pos, **kwargs)
|
||||
|
||||
def _infer(self, line, column, only_stubs=False, prefer_stubs=False):
|
||||
@@ -253,14 +335,18 @@ class Script(object):
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
|
||||
defs = [classes.Definition(self._inference_state, c.name) for c in values]
|
||||
defs = [classes.Name(self._inference_state, c.name) for c in values]
|
||||
# The additional set here allows the definitions to become unique in an
|
||||
# API sense. In the internals we want to separate more things than in
|
||||
# the API.
|
||||
return helpers.sorted_definitions(set(defs))
|
||||
|
||||
def goto_assignments(self, follow_imports=False, follow_builtin_imports=False, **kwargs):
|
||||
# Deprecated, will be removed.
|
||||
warnings.warn(
|
||||
"Deprecated since version 0.16.0. Use Script(...).goto instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return self.goto(*self._pos,
|
||||
follow_imports=follow_imports,
|
||||
follow_builtin_imports=follow_builtin_imports,
|
||||
@@ -269,41 +355,23 @@ class Script(object):
|
||||
@validate_line_column
|
||||
def goto(self, line=None, column=None, **kwargs):
|
||||
"""
|
||||
Return the first definition found, while optionally following imports.
|
||||
Multiple objects may be returned, because Python itself is a
|
||||
dynamic language, which means depending on an option you can have two
|
||||
Goes to the name that defined the object under the cursor. Optionally
|
||||
you can follow imports.
|
||||
Multiple objects may be returned, depending on an if you can have two
|
||||
different versions of a function.
|
||||
|
||||
:param follow_imports: The goto call will follow imports.
|
||||
:param follow_builtin_imports: If follow_imports is True will decide if
|
||||
it follow builtin imports.
|
||||
:param only_stubs: Only return stubs for this goto call.
|
||||
:param prefer_stubs: Prefer stubs to Python objects for this goto call.
|
||||
:rtype: list of :class:`classes.Definition`
|
||||
:param follow_imports: The method will follow imports.
|
||||
:param follow_builtin_imports: If ``follow_imports`` is True will try
|
||||
to look up names in builtins (i.e. compiled or extension modules).
|
||||
:param only_stubs: Only return stubs for this method.
|
||||
:param prefer_stubs: Prefer stubs to Python objects for this method.
|
||||
:rtype: list of :class:`.Name`
|
||||
"""
|
||||
with debug.increase_indent_cm('goto'):
|
||||
return self._goto(line, column, **kwargs)
|
||||
|
||||
def _goto(self, line, column, follow_imports=False, follow_builtin_imports=False,
|
||||
only_stubs=False, prefer_stubs=False):
|
||||
def filter_follow_imports(names):
|
||||
for name in names:
|
||||
if name.is_import():
|
||||
new_names = list(filter_follow_imports(name.goto()))
|
||||
found_builtin = False
|
||||
if follow_builtin_imports:
|
||||
for new_name in new_names:
|
||||
if new_name.start_pos is None:
|
||||
found_builtin = True
|
||||
|
||||
if found_builtin:
|
||||
yield name
|
||||
else:
|
||||
for new_name in new_names:
|
||||
yield new_name
|
||||
else:
|
||||
yield name
|
||||
|
||||
tree_name = self._module_node.get_name_of_position((line, column))
|
||||
if tree_name is None:
|
||||
# Without a name we really just want to jump to the result e.g.
|
||||
@@ -328,54 +396,107 @@ class Script(object):
|
||||
names = list(name.goto())
|
||||
|
||||
if follow_imports:
|
||||
names = filter_follow_imports(names)
|
||||
names = helpers.filter_follow_imports(names, follow_builtin_imports)
|
||||
names = convert_names(
|
||||
names,
|
||||
only_stubs=only_stubs,
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
|
||||
defs = [classes.Definition(self._inference_state, d) for d in set(names)]
|
||||
return helpers.sorted_definitions(defs)
|
||||
defs = [classes.Name(self._inference_state, d) for d in set(names)]
|
||||
# Avoid duplicates
|
||||
return list(set(helpers.sorted_definitions(defs)))
|
||||
|
||||
@_no_python2_support
|
||||
def search(self, string, **kwargs):
|
||||
"""
|
||||
Searches a name in the current file. For a description of how the
|
||||
search string should look like, please have a look at
|
||||
:meth:`.Project.search`.
|
||||
|
||||
:param bool all_scopes: Default False; searches not only for
|
||||
definitions on the top level of a module level, but also in
|
||||
functions and classes.
|
||||
:yields: :class:`.Name`
|
||||
"""
|
||||
return self._search(string, **kwargs) # Python 2 ...
|
||||
|
||||
def _search(self, string, all_scopes=False):
|
||||
return self._search_func(string, all_scopes=all_scopes)
|
||||
|
||||
@to_list
|
||||
def _search_func(self, string, all_scopes=False, complete=False, fuzzy=False):
|
||||
names = self._names(all_scopes=all_scopes)
|
||||
wanted_type, wanted_names = helpers.split_search_string(string)
|
||||
return search_in_module(
|
||||
self._inference_state,
|
||||
self._get_module_context(),
|
||||
names=names,
|
||||
wanted_type=wanted_type,
|
||||
wanted_names=wanted_names,
|
||||
complete=complete,
|
||||
fuzzy=fuzzy,
|
||||
)
|
||||
|
||||
def complete_search(self, string, **kwargs):
|
||||
"""
|
||||
Like :meth:`.Script.search`, but completes that string. If you want to
|
||||
have all possible definitions in a file you can also provide an empty
|
||||
string.
|
||||
|
||||
:param bool all_scopes: Default False; searches not only for
|
||||
definitions on the top level of a module level, but also in
|
||||
functions and classes.
|
||||
:param fuzzy: Default False. Will return fuzzy completions, which means
|
||||
that e.g. ``ooa`` will match ``foobar``.
|
||||
:yields: :class:`.Completion`
|
||||
"""
|
||||
return self._search_func(string, complete=True, **kwargs)
|
||||
|
||||
@validate_line_column
|
||||
def help(self, line=None, column=None):
|
||||
"""
|
||||
Works like goto and returns a list of Definition objects. Returns
|
||||
additional definitions for keywords and operators.
|
||||
Used to display a help window to users. Uses :meth:`.Script.goto` and
|
||||
returns additional definitions for keywords and operators.
|
||||
|
||||
The additional definitions are of ``Definition(...).type == 'keyword'``.
|
||||
Typically you will want to display :meth:`.BaseName.docstring` to the
|
||||
user for all the returned definitions.
|
||||
|
||||
The additional definitions are ``Name(...).type == 'keyword'``.
|
||||
These definitions do not have a lot of value apart from their docstring
|
||||
attribute, which contains the output of Python's ``help()`` function.
|
||||
attribute, which contains the output of Python's :func:`help` function.
|
||||
|
||||
:rtype: list of :class:`classes.Definition`
|
||||
:rtype: list of :class:`.Name`
|
||||
"""
|
||||
definitions = self.goto(line, column, follow_imports=True)
|
||||
if definitions:
|
||||
return definitions
|
||||
leaf = self._module_node.get_leaf_for_position((line, column))
|
||||
if leaf.type in ('keyword', 'operator', 'error_leaf'):
|
||||
reserved = self._grammar._pgen_grammar.reserved_syntax_strings.keys()
|
||||
reserved = self._inference_state.grammar._pgen_grammar.reserved_syntax_strings.keys()
|
||||
if leaf.value in reserved:
|
||||
name = KeywordName(self._inference_state, leaf.value)
|
||||
return [classes.Definition(self._inference_state, name)]
|
||||
return [classes.Name(self._inference_state, name)]
|
||||
return []
|
||||
|
||||
def usages(self, **kwargs):
|
||||
# Deprecated, will be removed.
|
||||
warnings.warn(
|
||||
"Deprecated since version 0.16.0. Use Script(...).get_references instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return self.get_references(*self._pos, **kwargs)
|
||||
|
||||
@validate_line_column
|
||||
def get_references(self, line=None, column=None, **kwargs):
|
||||
"""
|
||||
Return :class:`classes.Definition` objects, which contain all
|
||||
names that point to the definition of the name under the cursor. This
|
||||
is very useful for refactoring (renaming), or to show all references of
|
||||
a variable.
|
||||
Lists all references of a variable in a project. Since this can be
|
||||
quite hard to do for Jedi, if it is too complicated, Jedi will stop
|
||||
searching.
|
||||
|
||||
:param include_builtins: Default True, checks if a reference is a
|
||||
builtin (e.g. ``sys``) and in that case does not return it.
|
||||
:rtype: list of :class:`classes.Definition`
|
||||
:rtype: list of :class:`.Name`
|
||||
"""
|
||||
|
||||
def _references(include_builtins=True):
|
||||
@@ -386,20 +507,24 @@ class Script(object):
|
||||
|
||||
names = find_references(self._get_module_context(), tree_name)
|
||||
|
||||
definitions = [classes.Definition(self._inference_state, n) for n in names]
|
||||
definitions = [classes.Name(self._inference_state, n) for n in names]
|
||||
if not include_builtins:
|
||||
definitions = [d for d in definitions if not d.in_builtin_module()]
|
||||
return helpers.sorted_definitions(definitions)
|
||||
return _references(**kwargs)
|
||||
|
||||
def call_signatures(self):
|
||||
# Deprecated, will be removed.
|
||||
warnings.warn(
|
||||
"Deprecated since version 0.16.0. Use Script(...).get_signatures instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return self.get_signatures(*self._pos)
|
||||
|
||||
@validate_line_column
|
||||
def get_signatures(self, line=None, column=None):
|
||||
"""
|
||||
Return the function object of the call you're currently in.
|
||||
Return the function object of the call under the cursor.
|
||||
|
||||
E.g. if the cursor is here::
|
||||
|
||||
@@ -411,7 +536,7 @@ class Script(object):
|
||||
|
||||
This would return an empty list..
|
||||
|
||||
:rtype: list of :class:`classes.Signature`
|
||||
:rtype: list of :class:`.Signature`
|
||||
"""
|
||||
pos = line, column
|
||||
call_details = helpers.get_signature_details(self._module_node, pos)
|
||||
@@ -435,6 +560,12 @@ class Script(object):
|
||||
|
||||
@validate_line_column
|
||||
def get_context(self, line=None, column=None):
|
||||
"""
|
||||
Returns the scope context under the cursor. This basically means the
|
||||
function, class or module where the cursor is at.
|
||||
|
||||
:rtype: :class:`.Name`
|
||||
"""
|
||||
pos = (line, column)
|
||||
leaf = self._module_node.get_leaf_for_position(pos, include_prefixes=True)
|
||||
if leaf.start_pos > pos or leaf.type == 'endmarker':
|
||||
@@ -457,7 +588,7 @@ class Script(object):
|
||||
while context.name is None:
|
||||
context = context.parent_context # comprehensions
|
||||
|
||||
definition = classes.Definition(self._inference_state, context.name)
|
||||
definition = classes.Name(self._inference_state, context.name)
|
||||
while definition.type != 'module':
|
||||
name = definition._name # TODO private access
|
||||
tree_name = name.tree_name
|
||||
@@ -504,66 +635,198 @@ class Script(object):
|
||||
|
||||
def get_names(self, **kwargs):
|
||||
"""
|
||||
Returns a list of `Definition` objects, containing name parts.
|
||||
This means you can call ``Definition.goto()`` and get the
|
||||
reference of a name.
|
||||
Returns names defined in the current file.
|
||||
|
||||
:param all_scopes: If True lists the names of all scopes instead of only
|
||||
the module namespace.
|
||||
:param all_scopes: If True lists the names of all scopes instead of
|
||||
only the module namespace.
|
||||
:param definitions: If True lists the names that have been defined by a
|
||||
class, function or a statement (``a = b`` returns ``a``).
|
||||
:param references: If True lists all the names that are not listed by
|
||||
``definitions=True``. E.g. ``a = b`` returns ``b``.
|
||||
:rtype: list of :class:`.Name`
|
||||
"""
|
||||
return self._names(**kwargs) # Python 2...
|
||||
names = self._names(**kwargs)
|
||||
return [classes.Name(self._inference_state, n) for n in names]
|
||||
|
||||
def get_syntax_errors(self):
|
||||
"""
|
||||
Lists all syntax errors in the current file.
|
||||
|
||||
:rtype: list of :class:`.SyntaxError`
|
||||
"""
|
||||
return parso_to_jedi_errors(self._inference_state.grammar, self._module_node)
|
||||
|
||||
def _names(self, all_scopes=False, definitions=True, references=False):
|
||||
def def_ref_filter(_def):
|
||||
is_def = _def._name.tree_name.is_definition()
|
||||
return definitions and is_def or references and not is_def
|
||||
|
||||
# Set line/column to a random position, because they don't matter.
|
||||
module_context = self._get_module_context()
|
||||
defs = [
|
||||
classes.Definition(
|
||||
self._inference_state,
|
||||
module_context.create_name(name)
|
||||
) for name in get_module_names(self._module_node, all_scopes)
|
||||
module_context.create_name(name)
|
||||
for name in helpers.get_module_names(
|
||||
self._module_node,
|
||||
all_scopes=all_scopes,
|
||||
definitions=definitions,
|
||||
references=references,
|
||||
)
|
||||
]
|
||||
return sorted(filter(def_ref_filter, defs), key=lambda x: (x.line, x.column))
|
||||
return sorted(defs, key=lambda x: x.start_pos)
|
||||
|
||||
@_no_python2_support
|
||||
def rename(self, line=None, column=None, **kwargs):
|
||||
"""
|
||||
Renames all references of the variable under the cursor.
|
||||
|
||||
:param new_name: The variable under the cursor will be renamed to this
|
||||
string.
|
||||
:raises: :exc:`.RefactoringError`
|
||||
:rtype: :class:`.Refactoring`
|
||||
"""
|
||||
return self._rename(line, column, **kwargs)
|
||||
|
||||
def _rename(self, line, column, new_name): # Python 2...
|
||||
definitions = self.get_references(line, column, include_builtins=False)
|
||||
return refactoring.rename(self._inference_state, definitions, new_name)
|
||||
|
||||
@_no_python2_support
|
||||
def extract_variable(self, line, column, **kwargs):
|
||||
"""
|
||||
Moves an expression to a new statemenet.
|
||||
|
||||
For example if you have the cursor on ``foo`` and provide a
|
||||
``new_name`` called ``bar``::
|
||||
|
||||
foo = 3.1
|
||||
x = int(foo + 1)
|
||||
|
||||
the code above will become::
|
||||
|
||||
foo = 3.1
|
||||
bar = foo + 1
|
||||
x = int(bar)
|
||||
|
||||
:param new_name: The expression under the cursor will be renamed to
|
||||
this string.
|
||||
:param int until_line: The the selection range ends at this line, when
|
||||
omitted, Jedi will be clever and try to define the range itself.
|
||||
:param int until_column: The the selection range ends at this column, when
|
||||
omitted, Jedi will be clever and try to define the range itself.
|
||||
:raises: :exc:`.RefactoringError`
|
||||
:rtype: :class:`.Refactoring`
|
||||
"""
|
||||
return self._extract_variable(line, column, **kwargs) # Python 2...
|
||||
|
||||
@validate_line_column
|
||||
def _extract_variable(self, line, column, new_name, until_line=None, until_column=None):
|
||||
if until_line is None and until_column is None:
|
||||
until_pos = None
|
||||
else:
|
||||
if until_line is None:
|
||||
until_line = line
|
||||
if until_column is None:
|
||||
until_column = len(self._code_lines[until_line - 1])
|
||||
until_pos = until_line, until_column
|
||||
return extract_variable(
|
||||
self._inference_state, self.path, self._module_node,
|
||||
new_name, (line, column), until_pos
|
||||
)
|
||||
|
||||
@_no_python2_support
|
||||
def extract_function(self, line, column, **kwargs):
|
||||
"""
|
||||
Moves an expression to a new function.
|
||||
|
||||
For example if you have the cursor on ``foo`` and provide a
|
||||
``new_name`` called ``bar``::
|
||||
|
||||
global_var = 3
|
||||
|
||||
def x():
|
||||
foo = 3.1
|
||||
x = int(foo + 1 + global_var)
|
||||
|
||||
the code above will become::
|
||||
|
||||
global_var = 3
|
||||
|
||||
def bar(foo):
|
||||
return foo + 1 + global_var
|
||||
|
||||
def x(foo):
|
||||
x = int(bar(foo))
|
||||
|
||||
:param new_name: The expression under the cursor will be replaced with
|
||||
a function with this name.
|
||||
:param int until_line: The the selection range ends at this line, when
|
||||
omitted, Jedi will be clever and try to define the range itself.
|
||||
:param int until_column: The the selection range ends at this column, when
|
||||
omitted, Jedi will be clever and try to define the range itself.
|
||||
:raises: :exc:`.RefactoringError`
|
||||
:rtype: :class:`.Refactoring`
|
||||
"""
|
||||
return self._extract_function(line, column, **kwargs) # Python 2...
|
||||
|
||||
@validate_line_column
|
||||
def _extract_function(self, line, column, new_name, until_line=None, until_column=None):
|
||||
if until_line is None and until_column is None:
|
||||
until_pos = None
|
||||
else:
|
||||
if until_line is None:
|
||||
until_line = line
|
||||
if until_column is None:
|
||||
until_column = len(self._code_lines[until_line - 1])
|
||||
until_pos = until_line, until_column
|
||||
return extract_function(
|
||||
self._inference_state, self.path, self._get_module_context(),
|
||||
new_name, (line, column), until_pos
|
||||
)
|
||||
|
||||
@_no_python2_support
|
||||
def inline(self, line=None, column=None):
|
||||
"""
|
||||
Inlines a variable under the cursor. This is basically the opposite of
|
||||
extracting a variable. For example with the cursor on bar::
|
||||
|
||||
foo = 3.1
|
||||
bar = foo + 1
|
||||
x = int(bar)
|
||||
|
||||
the code above will become::
|
||||
|
||||
foo = 3.1
|
||||
x = int(foo + 1)
|
||||
|
||||
:raises: :exc:`.RefactoringError`
|
||||
:rtype: :class:`.Refactoring`
|
||||
"""
|
||||
names = [d._name for d in self.get_references(line, column, include_builtins=True)]
|
||||
return refactoring.inline(self._inference_state, names)
|
||||
|
||||
|
||||
class Interpreter(Script):
|
||||
"""
|
||||
Jedi API for Python REPLs.
|
||||
Jedi's API for Python REPLs.
|
||||
|
||||
In addition to completion of simple attribute access, Jedi
|
||||
supports code completion based on static code analysis.
|
||||
Jedi can complete attributes of object which is not initialized
|
||||
yet.
|
||||
Implements all of the methods that are present in :class:`.Script` as well.
|
||||
|
||||
In addition to completions that normal REPL completion does like
|
||||
``str.upper``, Jedi also supports code completion based on static code
|
||||
analysis. For example Jedi will complete ``str().upper``.
|
||||
|
||||
>>> from os.path import join
|
||||
>>> namespace = locals()
|
||||
>>> script = Interpreter('join("").up', [namespace])
|
||||
>>> print(script.complete()[0].name)
|
||||
upper
|
||||
|
||||
All keyword arguments are same as the arguments for :class:`.Script`.
|
||||
|
||||
:param str code: Code to parse.
|
||||
:type namespaces: typing.List[dict]
|
||||
:param namespaces: A list of namespace dictionaries such as the one
|
||||
returned by :func:`globals` and :func:`locals`.
|
||||
"""
|
||||
_allow_descriptor_getattr_default = True
|
||||
|
||||
def __init__(self, source, namespaces, **kwds):
|
||||
"""
|
||||
Parse `source` and mixin interpreted Python objects from `namespaces`.
|
||||
|
||||
:type source: str
|
||||
:arg source: Code to parse.
|
||||
:type namespaces: list of dict
|
||||
:arg namespaces: a list of namespace dictionaries such as the one
|
||||
returned by :func:`locals`.
|
||||
|
||||
Other optional arguments are same as the ones for :class:`Script`.
|
||||
If `line` and `column` are None, they are assumed be at the end of
|
||||
`source`.
|
||||
"""
|
||||
def __init__(self, code, namespaces, **kwds):
|
||||
try:
|
||||
namespaces = [dict(n) for n in namespaces]
|
||||
except Exception:
|
||||
@@ -576,8 +839,8 @@ class Interpreter(Script):
|
||||
if not isinstance(environment, InterpreterEnvironment):
|
||||
raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
|
||||
|
||||
super(Interpreter, self).__init__(source, environment=environment,
|
||||
_project=Project(os.getcwd()), **kwds)
|
||||
super(Interpreter, self).__init__(code, environment=environment,
|
||||
project=Project(os.getcwd()), **kwds)
|
||||
self.namespaces = namespaces
|
||||
self._inference_state.allow_descriptor_getattr = self._allow_descriptor_getattr_default
|
||||
|
||||
@@ -613,7 +876,8 @@ def names(source=None, path=None, encoding='utf-8', all_scopes=False,
|
||||
def preload_module(*modules):
|
||||
"""
|
||||
Preloading modules tells Jedi to load a module now, instead of lazy parsing
|
||||
of modules. Usful for IDEs, to control which modules to load on startup.
|
||||
of modules. This can be useful for IDEs, to control which modules to load
|
||||
on startup.
|
||||
|
||||
:param modules: different module names, list of string.
|
||||
"""
|
||||
@@ -629,7 +893,7 @@ def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
|
||||
|
||||
If you don't specify any arguments, debug messages will be printed to stdout.
|
||||
|
||||
:param func_cb: The callback function for debug messages, with n params.
|
||||
:param func_cb: The callback function for debug messages.
|
||||
"""
|
||||
debug.debug_function = func_cb
|
||||
debug.enable_warning = warnings
|
||||
|
||||
@@ -1,7 +1,17 @@
|
||||
"""
|
||||
The :mod:`jedi.api.classes` module contains the return classes of the API.
|
||||
These classes are the much bigger part of the whole API, because they contain
|
||||
the interesting information about completion and goto operations.
|
||||
There are a couple of classes documented in here:
|
||||
|
||||
- :class:`.BaseName` as an abstact base class for almost everything.
|
||||
- :class:`.Name` used in a lot of places
|
||||
- :class:`.Completion` for completions
|
||||
- :class:`.BaseSignature` as a base class for signatures
|
||||
- :class:`.Signature` for :meth:`.Script.get_signatures` only
|
||||
- :class:`.ParamName` used for parameters of signatures
|
||||
- :class:`.Refactoring` for refactorings
|
||||
- :class:`.SyntaxError` for :meth:`.Script.get_syntax_errors` only
|
||||
|
||||
These classes are the much biggest part of the API, because they contain
|
||||
the interesting information about all operations.
|
||||
"""
|
||||
import re
|
||||
import sys
|
||||
@@ -20,6 +30,7 @@ from jedi.inference.gradual.conversion import convert_names, convert_values
|
||||
from jedi.inference.base_value import ValueSet
|
||||
from jedi.api.keywords import KeywordName
|
||||
from jedi.api import completion_cache
|
||||
from jedi.api.helpers import filter_follow_imports
|
||||
|
||||
|
||||
def _sort_names_by_start_pos(names):
|
||||
@@ -31,18 +42,21 @@ def defined_names(inference_state, context):
|
||||
List sub-definitions (e.g., methods in class).
|
||||
|
||||
:type scope: Scope
|
||||
:rtype: list of Definition
|
||||
:rtype: list of Name
|
||||
"""
|
||||
filter = next(context.get_filters())
|
||||
names = [name for name in filter.values()]
|
||||
return [Definition(inference_state, n) for n in _sort_names_by_start_pos(names)]
|
||||
return [Name(inference_state, n) for n in _sort_names_by_start_pos(names)]
|
||||
|
||||
|
||||
def _values_to_definitions(values):
|
||||
return [Definition(c.inference_state, c.name) for c in values]
|
||||
return [Name(c.inference_state, c.name) for c in values]
|
||||
|
||||
|
||||
class BaseDefinition(object):
|
||||
class BaseName(object):
|
||||
"""
|
||||
The base class for all definitions, completions and signatures.
|
||||
"""
|
||||
_mapping = {
|
||||
'posixpath': 'os.path',
|
||||
'riscospath': 'os.path',
|
||||
@@ -137,10 +151,10 @@ class BaseDefinition(object):
|
||||
|
||||
>>> defs = sorted(defs, key=lambda d: d.line)
|
||||
>>> no_unicode_pprint(defs) # doctest: +NORMALIZE_WHITESPACE
|
||||
[<Definition full_name='keyword', description='module keyword'>,
|
||||
<Definition full_name='__main__.C', description='class C'>,
|
||||
<Definition full_name='__main__.D', description='instance D'>,
|
||||
<Definition full_name='__main__.f', description='def f'>]
|
||||
[<Name full_name='keyword', description='module keyword'>,
|
||||
<Name full_name='__main__.C', description='class C'>,
|
||||
<Name full_name='__main__.D', description='instance D'>,
|
||||
<Name full_name='__main__.f', description='def f'>]
|
||||
|
||||
Finally, here is what you can get from :attr:`type`:
|
||||
|
||||
@@ -155,7 +169,7 @@ class BaseDefinition(object):
|
||||
'function'
|
||||
|
||||
Valid values for are ``module``, ``class``, ``instance``, ``function``,
|
||||
``param``, ``path`` and ``keyword``.
|
||||
``param``, ``path``, ``keyword`` and ``statement``.
|
||||
|
||||
"""
|
||||
tree_name = self._name.tree_name
|
||||
@@ -175,7 +189,8 @@ class BaseDefinition(object):
|
||||
@property
|
||||
def module_name(self):
|
||||
"""
|
||||
The module name.
|
||||
The module name, a bit similar to what ``__name__`` is in a random
|
||||
Python module.
|
||||
|
||||
>>> from jedi import Script
|
||||
>>> source = 'import json'
|
||||
@@ -187,7 +202,9 @@ class BaseDefinition(object):
|
||||
return self._get_module_context().py__name__()
|
||||
|
||||
def in_builtin_module(self):
|
||||
"""Whether this is a builtin module."""
|
||||
"""
|
||||
Returns True, if this is a builtin module.
|
||||
"""
|
||||
value = self._get_module_context().get_value()
|
||||
if isinstance(value, StubModuleValue):
|
||||
return any(v.is_compiled() for v in value.non_stub_value_set)
|
||||
@@ -264,7 +281,7 @@ class BaseDefinition(object):
|
||||
@property
|
||||
def description(self):
|
||||
"""
|
||||
A description of the :class:`.Definition` object, which is heavily used
|
||||
A description of the :class:`.Name` object, which is heavily used
|
||||
in testing. e.g. for ``isinstance`` it returns ``def isinstance``.
|
||||
|
||||
Example:
|
||||
@@ -283,8 +300,8 @@ class BaseDefinition(object):
|
||||
>>> defs = script.infer(column=3)
|
||||
>>> defs = sorted(defs, key=lambda d: d.line)
|
||||
>>> no_unicode_pprint(defs) # doctest: +NORMALIZE_WHITESPACE
|
||||
[<Definition full_name='__main__.f', description='def f'>,
|
||||
<Definition full_name='__main__.C', description='class C'>]
|
||||
[<Name full_name='__main__.f', description='def f'>,
|
||||
<Name full_name='__main__.C', description='class C'>]
|
||||
>>> str(defs[0].description) # strip literals in python2
|
||||
'def f'
|
||||
>>> str(defs[1].description)
|
||||
@@ -340,7 +357,7 @@ class BaseDefinition(object):
|
||||
|
||||
names = self._name.get_qualified_names(include_module_names=True)
|
||||
if names is None:
|
||||
return names
|
||||
return None
|
||||
|
||||
names = list(names)
|
||||
try:
|
||||
@@ -351,12 +368,37 @@ class BaseDefinition(object):
|
||||
return '.'.join(names)
|
||||
|
||||
def is_stub(self):
|
||||
"""
|
||||
Returns True if the current name is defined in a stub file.
|
||||
"""
|
||||
if not self._name.is_value_name:
|
||||
return False
|
||||
|
||||
return self._name.get_root_context().is_stub()
|
||||
|
||||
def is_side_effect(self):
|
||||
"""
|
||||
Checks if a name is defined as ``self.foo = 3``. In case of self, this
|
||||
function would return False, for foo it would return True.
|
||||
"""
|
||||
tree_name = self._name.tree_name
|
||||
if tree_name is None:
|
||||
return False
|
||||
return tree_name.is_definition() and tree_name.parent.type == 'trailer'
|
||||
|
||||
def goto(self, **kwargs):
|
||||
"""
|
||||
Like :meth:`.Script.goto` (also supports the same params), but does it
|
||||
for the current name. This is typically useful if you are using
|
||||
something like :meth:`.Script.get_names()`.
|
||||
|
||||
:param follow_imports: The goto call will follow imports.
|
||||
:param follow_builtin_imports: If follow_imports is True will try to
|
||||
look up names in builtins (i.e. compiled or extension modules).
|
||||
:param only_stubs: Only return stubs for this goto call.
|
||||
:param prefer_stubs: Prefer stubs to Python objects for this goto call.
|
||||
:rtype: list of :class:`Name`
|
||||
"""
|
||||
with debug.increase_indent_cm('goto for %s' % self._name):
|
||||
return self._goto(**kwargs)
|
||||
|
||||
@@ -368,21 +410,40 @@ class BaseDefinition(object):
|
||||
)
|
||||
return self.goto(**kwargs)
|
||||
|
||||
def _goto(self, only_stubs=False, prefer_stubs=False):
|
||||
assert not (only_stubs and prefer_stubs)
|
||||
def _goto(self, follow_imports=False, follow_builtin_imports=False,
|
||||
only_stubs=False, prefer_stubs=False):
|
||||
|
||||
if not self._name.is_value_name:
|
||||
return []
|
||||
|
||||
names = self._name.goto()
|
||||
if follow_imports:
|
||||
names = filter_follow_imports(names, follow_builtin_imports)
|
||||
names = convert_names(
|
||||
self._name.goto(),
|
||||
names,
|
||||
only_stubs=only_stubs,
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
return [self if n == self._name else Definition(self._inference_state, n)
|
||||
return [self if n == self._name else Name(self._inference_state, n)
|
||||
for n in names]
|
||||
|
||||
def infer(self, **kwargs): # Python 2...
|
||||
"""
|
||||
Like :meth:`.Script.infer`, it can be useful to understand which type
|
||||
the current name has.
|
||||
|
||||
Return the actual definitions. I strongly recommend not using it for
|
||||
your completions, because it might slow down |jedi|. If you want to
|
||||
read only a few objects (<=20), it might be useful, especially to get
|
||||
the original docstrings. The basic problem of this function is that it
|
||||
follows all results. This means with 1000 completions (e.g. numpy),
|
||||
it's just very, very slow.
|
||||
|
||||
:param only_stubs: Only return stubs for this goto call.
|
||||
:param prefer_stubs: Prefer stubs to Python objects for this type
|
||||
inference call.
|
||||
:rtype: list of :class:`Name`
|
||||
"""
|
||||
with debug.increase_indent_cm('infer for %s' % self._name):
|
||||
return self._infer(**kwargs)
|
||||
|
||||
@@ -402,18 +463,12 @@ class BaseDefinition(object):
|
||||
prefer_stubs=prefer_stubs,
|
||||
)
|
||||
resulting_names = [c.name for c in values]
|
||||
return [self if n == self._name else Definition(self._inference_state, n)
|
||||
return [self if n == self._name else Name(self._inference_state, n)
|
||||
for n in resulting_names]
|
||||
|
||||
@property
|
||||
@memoize_method
|
||||
def params(self):
|
||||
"""
|
||||
Deprecated! Will raise a warning soon. Use get_signatures()[...].params.
|
||||
|
||||
Raises an ``AttributeError`` if the definition is not callable.
|
||||
Otherwise returns a list of `Definition` that represents the params.
|
||||
"""
|
||||
warnings.warn(
|
||||
"Deprecated since version 0.16.0. Use get_signatures()[...].params",
|
||||
DeprecationWarning,
|
||||
@@ -423,7 +478,7 @@ class BaseDefinition(object):
|
||||
# with overloading.
|
||||
for signature in self._get_signatures():
|
||||
return [
|
||||
Definition(self._inference_state, n)
|
||||
Name(self._inference_state, n)
|
||||
for n in signature.get_param_names(resolve_stars=True)
|
||||
]
|
||||
|
||||
@@ -434,6 +489,11 @@ class BaseDefinition(object):
|
||||
raise AttributeError('There are no params defined on this.')
|
||||
|
||||
def parent(self):
|
||||
"""
|
||||
Returns the parent scope of this identifier.
|
||||
|
||||
:rtype: Name
|
||||
"""
|
||||
if not self._name.is_value_name:
|
||||
return None
|
||||
|
||||
@@ -459,7 +519,7 @@ class BaseDefinition(object):
|
||||
# Happens for comprehension contexts
|
||||
context = context.parent_context
|
||||
|
||||
return Definition(self._inference_state, context.name)
|
||||
return Name(self._inference_state, context.name)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %sname=%r, description=%r>" % (
|
||||
@@ -501,18 +561,42 @@ class BaseDefinition(object):
|
||||
return [sig for name in names for sig in name.infer().get_signatures()]
|
||||
|
||||
def get_signatures(self):
|
||||
"""
|
||||
Returns all potential signatures for a function or a class. Multiple
|
||||
signatures are typical if you use Python stubs with ``@overload``.
|
||||
|
||||
:rtype: list of :class:`BaseSignature`
|
||||
"""
|
||||
return [
|
||||
BaseSignature(self._inference_state, s)
|
||||
for s in self._get_signatures()
|
||||
]
|
||||
|
||||
def execute(self):
|
||||
"""
|
||||
Uses type inference to "execute" this identifier and returns the
|
||||
executed objects.
|
||||
|
||||
:rtype: list of :class:`Name`
|
||||
"""
|
||||
return _values_to_definitions(self._name.infer().execute_with_values())
|
||||
|
||||
def get_type_hint(self):
|
||||
"""
|
||||
Returns type hints like ``Iterable[int]`` or ``Union[int, str]``.
|
||||
|
||||
class Completion(BaseDefinition):
|
||||
This method might be quite slow, especially for functions. The problem
|
||||
is finding executions for those functions to return something like
|
||||
``Callable[[int, str], str]``.
|
||||
|
||||
:rtype: str
|
||||
"""
|
||||
return self._name.infer().get_type_hint()
|
||||
|
||||
|
||||
class Completion(BaseName):
|
||||
"""
|
||||
`Completion` objects are returned from :meth:`api.Script.complete`. They
|
||||
``Completion`` objects are returned from :meth:`.Script.complete`. They
|
||||
provide additional information about a completion.
|
||||
"""
|
||||
def __init__(self, inference_state, name, stack, like_name_length,
|
||||
@@ -550,15 +634,15 @@ class Completion(BaseDefinition):
|
||||
isinstan# <-- Cursor is here
|
||||
|
||||
would return the string 'ce'. It also adds additional stuff, depending
|
||||
on your `settings.py`.
|
||||
on your ``settings.py``.
|
||||
|
||||
Assuming the following function definition::
|
||||
|
||||
def foo(param=0):
|
||||
pass
|
||||
|
||||
completing ``foo(par`` would give a ``Completion`` which `complete`
|
||||
would be `am=`
|
||||
completing ``foo(par`` would give a ``Completion`` which ``complete``
|
||||
would be ``am=``.
|
||||
"""
|
||||
if self._is_fuzzy:
|
||||
return None
|
||||
@@ -567,7 +651,7 @@ class Completion(BaseDefinition):
|
||||
@property
|
||||
def name_with_symbols(self):
|
||||
"""
|
||||
Similar to :attr:`name`, but like :attr:`name` returns also the
|
||||
Similar to :attr:`.name`, but like :attr:`.name` returns also the
|
||||
symbols, for example assuming the following function definition::
|
||||
|
||||
def foo(param=0):
|
||||
@@ -580,6 +664,9 @@ class Completion(BaseDefinition):
|
||||
return self._complete(False)
|
||||
|
||||
def docstring(self, raw=False, fast=True):
|
||||
"""
|
||||
Documentated under :meth:`BaseName.docstring`.
|
||||
"""
|
||||
if self._like_name_length >= 3:
|
||||
# In this case we can just resolve the like name, because we
|
||||
# wouldn't load like > 100 Python modules anymore.
|
||||
@@ -615,6 +702,9 @@ class Completion(BaseDefinition):
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
"""
|
||||
Documentated under :meth:`BaseName.type`.
|
||||
"""
|
||||
# Purely a speed optimization.
|
||||
if self._cached_name is not None:
|
||||
return completion_cache.get_type(
|
||||
@@ -628,45 +718,22 @@ class Completion(BaseDefinition):
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (type(self).__name__, self._name.get_public_name())
|
||||
|
||||
@memoize_method
|
||||
def follow_definition(self):
|
||||
"""
|
||||
Deprecated!
|
||||
|
||||
Return the original definitions. I strongly recommend not using it for
|
||||
your completions, because it might slow down |jedi|. If you want to
|
||||
read only a few objects (<=20), it might be useful, especially to get
|
||||
the original docstrings. The basic problem of this function is that it
|
||||
follows all results. This means with 1000 completions (e.g. numpy),
|
||||
it's just PITA-slow.
|
||||
"""
|
||||
warnings.warn(
|
||||
"Deprecated since version 0.14.0. Use .infer.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return self.infer()
|
||||
|
||||
|
||||
class Definition(BaseDefinition):
|
||||
class Name(BaseName):
|
||||
"""
|
||||
*Definition* objects are returned from :meth:`api.Script.goto`
|
||||
or :meth:`api.Script.infer`.
|
||||
*Name* objects are returned from many different APIs including
|
||||
:meth:`.Script.goto` or :meth:`.Script.infer`.
|
||||
"""
|
||||
def __init__(self, inference_state, definition):
|
||||
super(Definition, self).__init__(inference_state, definition)
|
||||
super(Name, self).__init__(inference_state, definition)
|
||||
|
||||
@property
|
||||
def desc_with_module(self):
|
||||
"""
|
||||
In addition to the definition, also return the module.
|
||||
|
||||
.. warning:: Don't use this function yet, its behaviour may change. If
|
||||
you really need it, talk to me.
|
||||
|
||||
.. todo:: Add full path. This function is should return a
|
||||
`module.class.function` path.
|
||||
"""
|
||||
warnings.warn(
|
||||
"Deprecated since version 0.17.0. No replacement for now, maybe .full_name helps",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
position = '' if self.in_builtin_module else '@%s' % self.line
|
||||
return "%s:%s%s" % (self.module_name, self.description, position)
|
||||
|
||||
@@ -675,7 +742,7 @@ class Definition(BaseDefinition):
|
||||
"""
|
||||
List sub-definitions (e.g., methods in class).
|
||||
|
||||
:rtype: list of Definition
|
||||
:rtype: list of :class:`Name`
|
||||
"""
|
||||
defs = self._name.infer()
|
||||
return sorted(
|
||||
@@ -706,11 +773,10 @@ class Definition(BaseDefinition):
|
||||
return hash((self._name.start_pos, self.module_path, self.name, self._inference_state))
|
||||
|
||||
|
||||
class BaseSignature(Definition):
|
||||
class BaseSignature(Name):
|
||||
"""
|
||||
`BaseSignature` objects is the return value of `Script.function_definition`.
|
||||
It knows what functions you are currently in. e.g. `isinstance(` would
|
||||
return the `isinstance` function. without `(` it would return nothing.
|
||||
These signatures are returned by :meth:`BaseName.get_signatures`
|
||||
calls.
|
||||
"""
|
||||
def __init__(self, inference_state, signature):
|
||||
super(BaseSignature, self).__init__(inference_state, signature.name)
|
||||
@@ -719,21 +785,28 @@ class BaseSignature(Definition):
|
||||
@property
|
||||
def params(self):
|
||||
"""
|
||||
:return list of ParamDefinition:
|
||||
Returns definitions for all parameters that a signature defines.
|
||||
This includes stuff like ``*args`` and ``**kwargs``.
|
||||
|
||||
:rtype: list of :class:`.ParamName`
|
||||
"""
|
||||
return [ParamDefinition(self._inference_state, n)
|
||||
return [ParamName(self._inference_state, n)
|
||||
for n in self._signature.get_param_names(resolve_stars=True)]
|
||||
|
||||
def to_string(self):
|
||||
"""
|
||||
Returns a text representation of the signature. This could for example
|
||||
look like ``foo(bar, baz: int, **kwargs)``.
|
||||
|
||||
:return str
|
||||
"""
|
||||
return self._signature.to_string()
|
||||
|
||||
|
||||
class Signature(BaseSignature):
|
||||
"""
|
||||
`Signature` objects is the return value of `Script.get_signatures`.
|
||||
It knows what functions you are currently in. e.g. `isinstance(` would
|
||||
return the `isinstance` function with its params. Without `(` it would
|
||||
return nothing.
|
||||
A full signature object is the return value of
|
||||
:meth:`.Script.get_signatures`.
|
||||
"""
|
||||
def __init__(self, inference_state, signature, call_details):
|
||||
super(Signature, self).__init__(inference_state, signature)
|
||||
@@ -743,8 +816,10 @@ class Signature(BaseSignature):
|
||||
@property
|
||||
def index(self):
|
||||
"""
|
||||
The Param index of the current call.
|
||||
Returns the param index of the current cursor position.
|
||||
Returns None if the index cannot be found in the curent call.
|
||||
|
||||
:rtype: int
|
||||
"""
|
||||
return self._call_details.calculate_index(
|
||||
self._signature.get_param_names(resolve_stars=True)
|
||||
@@ -753,8 +828,10 @@ class Signature(BaseSignature):
|
||||
@property
|
||||
def bracket_start(self):
|
||||
"""
|
||||
The line/column of the bracket that is responsible for the last
|
||||
function call.
|
||||
Returns a line/column tuple of the bracket that is responsible for the
|
||||
last function call. The first line is 1 and the first column 0.
|
||||
|
||||
:rtype: int, int
|
||||
"""
|
||||
return self._call_details.bracket_leaf.start_pos
|
||||
|
||||
@@ -766,32 +843,38 @@ class Signature(BaseSignature):
|
||||
)
|
||||
|
||||
|
||||
class ParamDefinition(Definition):
|
||||
class ParamName(Name):
|
||||
def infer_default(self):
|
||||
"""
|
||||
:return list of Definition:
|
||||
Returns default values like the ``1`` of ``def foo(x=1):``.
|
||||
|
||||
:rtype: list of :class:`.Name`
|
||||
"""
|
||||
return _values_to_definitions(self._name.infer_default())
|
||||
|
||||
def infer_annotation(self, **kwargs):
|
||||
"""
|
||||
:return list of Definition:
|
||||
|
||||
:param execute_annotation: If False, the values are not executed and
|
||||
you get classes instead of instances.
|
||||
:param execute_annotation: Default True; If False, values are not
|
||||
executed and classes are returned instead of instances.
|
||||
:rtype: list of :class:`.Name`
|
||||
"""
|
||||
return _values_to_definitions(self._name.infer_annotation(ignore_stars=True, **kwargs))
|
||||
|
||||
def to_string(self):
|
||||
"""
|
||||
Returns a simple representation of a param, like
|
||||
``f: Callable[..., Any]``.
|
||||
|
||||
:rtype: :class:`str`
|
||||
"""
|
||||
return self._name.to_string()
|
||||
|
||||
@property
|
||||
def kind(self):
|
||||
"""
|
||||
Returns an enum instance. Returns the same values as the builtin
|
||||
:py:attr:`inspect.Parameter.kind`.
|
||||
Returns an enum instance of :mod:`inspect`'s ``Parameter`` enum.
|
||||
|
||||
No support for Python < 3.4 anymore.
|
||||
:rtype: :py:attr:`inspect.Parameter.kind`
|
||||
"""
|
||||
if sys.version_info < (3, 5):
|
||||
raise NotImplementedError(
|
||||
|
||||
@@ -19,8 +19,8 @@ from jedi.inference.base_value import ValueSet
|
||||
from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names
|
||||
from jedi.inference.context import get_global_filters
|
||||
from jedi.inference.value import TreeInstance, ModuleValue
|
||||
from jedi.inference.names import ParamNameWrapper
|
||||
from jedi.inference.gradual.conversion import convert_values
|
||||
from jedi.inference.names import ParamNameWrapper, SubModuleName
|
||||
from jedi.inference.gradual.conversion import convert_values, convert_names
|
||||
from jedi.parser_utils import cut_value_at_position
|
||||
from jedi.plugins import plugin_manager
|
||||
|
||||
@@ -30,29 +30,52 @@ class ParamNameWithEquals(ParamNameWrapper):
|
||||
return self.string_name + '='
|
||||
|
||||
|
||||
def get_signature_param_names(signatures):
|
||||
# add named params
|
||||
def _get_signature_param_names(signatures, positional_count, used_kwargs):
|
||||
# Add named params
|
||||
for call_sig in signatures:
|
||||
for p in call_sig.params:
|
||||
for i, p in enumerate(call_sig.params):
|
||||
# Allow protected access, because it's a public API.
|
||||
if p._name.get_kind() in (Parameter.POSITIONAL_OR_KEYWORD,
|
||||
Parameter.KEYWORD_ONLY):
|
||||
# TODO reconsider with Python 2 drop
|
||||
kind = p._name.get_kind()
|
||||
if i < positional_count and kind == Parameter.POSITIONAL_OR_KEYWORD:
|
||||
continue
|
||||
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) \
|
||||
and p.name not in used_kwargs:
|
||||
yield ParamNameWithEquals(p._name)
|
||||
|
||||
|
||||
def _must_be_kwarg(signatures, positional_count, used_kwargs):
|
||||
if used_kwargs:
|
||||
return True
|
||||
|
||||
must_be_kwarg = True
|
||||
for signature in signatures:
|
||||
for i, p in enumerate(signature.params):
|
||||
# TODO reconsider with Python 2 drop
|
||||
kind = p._name.get_kind()
|
||||
if kind is Parameter.VAR_POSITIONAL:
|
||||
# In case there were not already kwargs, the next param can
|
||||
# always be a normal argument.
|
||||
return False
|
||||
|
||||
if i >= positional_count and kind in (Parameter.POSITIONAL_OR_KEYWORD,
|
||||
Parameter.POSITIONAL_ONLY):
|
||||
must_be_kwarg = False
|
||||
break
|
||||
if not must_be_kwarg:
|
||||
break
|
||||
return must_be_kwarg
|
||||
|
||||
|
||||
def filter_names(inference_state, completion_names, stack, like_name, fuzzy, cached_name):
|
||||
comp_dct = {}
|
||||
comp_dct = set()
|
||||
if settings.case_insensitive_completion:
|
||||
like_name = like_name.lower()
|
||||
for name in completion_names:
|
||||
string = name.string_name
|
||||
if settings.case_insensitive_completion:
|
||||
string = string.lower()
|
||||
if fuzzy:
|
||||
match = helpers.fuzzy_match(string, like_name)
|
||||
else:
|
||||
match = helpers.start_match(string, like_name)
|
||||
if match:
|
||||
if helpers.match(string, like_name, fuzzy=fuzzy):
|
||||
new = classes.Completion(
|
||||
inference_state,
|
||||
name,
|
||||
@@ -62,10 +85,13 @@ def filter_names(inference_state, completion_names, stack, like_name, fuzzy, cac
|
||||
cached_name=cached_name,
|
||||
)
|
||||
k = (new.name, new.complete) # key
|
||||
if k in comp_dct and settings.no_completion_duplicates:
|
||||
comp_dct[k]._same_name_completions.append(new)
|
||||
else:
|
||||
comp_dct[k] = new
|
||||
if k not in comp_dct:
|
||||
comp_dct.add(k)
|
||||
tree_name = name.tree_name
|
||||
if tree_name is not None:
|
||||
definition = tree_name.get_definition()
|
||||
if definition is not None and definition.type == 'del_stmt':
|
||||
continue
|
||||
yield new
|
||||
|
||||
|
||||
@@ -132,7 +158,7 @@ class Completion:
|
||||
|
||||
if string is not None and not prefixed_completions:
|
||||
prefixed_completions = list(complete_file_name(
|
||||
self._inference_state, self._module_context, start_leaf, string,
|
||||
self._inference_state, self._module_context, start_leaf, quote, string,
|
||||
self._like_name, self._signatures_callback,
|
||||
self._code_lines, self._original_position,
|
||||
self._fuzzy
|
||||
@@ -232,12 +258,7 @@ class Completion:
|
||||
completion_names = []
|
||||
current_line = self._code_lines[self._position[0] - 1][:self._position[1]]
|
||||
|
||||
completion_names += self._complete_keywords(
|
||||
allowed_transitions,
|
||||
only_values=not (not current_line or current_line[-1] in ' \t.;'
|
||||
and current_line[-3:] != '...')
|
||||
)
|
||||
|
||||
kwargs_only = False
|
||||
if any(t in allowed_transitions for t in (PythonTokenTypes.NAME,
|
||||
PythonTokenTypes.INDENT)):
|
||||
# This means that we actually have to do type inference.
|
||||
@@ -265,20 +286,40 @@ class Completion:
|
||||
elif self._is_parameter_completion():
|
||||
completion_names += self._complete_params(leaf)
|
||||
else:
|
||||
completion_names += self._complete_global_scope()
|
||||
completion_names += self._complete_inherited(is_function=False)
|
||||
# Apparently this looks like it's good enough to filter most cases
|
||||
# so that signature completions don't randomly appear.
|
||||
# To understand why this works, three things are important:
|
||||
# 1. trailer with a `,` in it is either a subscript or an arglist.
|
||||
# 2. If there's no `,`, it's at the start and only signatures start
|
||||
# with `(`. Other trailers could start with `.` or `[`.
|
||||
# 3. Decorators are very primitive and have an optional `(` with
|
||||
# optional arglist in them.
|
||||
if nodes[-1] in ['(', ','] \
|
||||
and nonterminals[-1] in ('trailer', 'arglist', 'decorator'):
|
||||
signatures = self._signatures_callback(*self._position)
|
||||
if signatures:
|
||||
call_details = signatures[0]._call_details
|
||||
used_kwargs = list(call_details.iter_used_keyword_arguments())
|
||||
positional_count = call_details.count_positional_arguments()
|
||||
|
||||
# Apparently this looks like it's good enough to filter most cases
|
||||
# so that signature completions don't randomly appear.
|
||||
# To understand why this works, three things are important:
|
||||
# 1. trailer with a `,` in it is either a subscript or an arglist.
|
||||
# 2. If there's no `,`, it's at the start and only signatures start
|
||||
# with `(`. Other trailers could start with `.` or `[`.
|
||||
# 3. Decorators are very primitive and have an optional `(` with
|
||||
# optional arglist in them.
|
||||
if nodes[-1] in ['(', ','] and nonterminals[-1] in ('trailer', 'arglist', 'decorator'):
|
||||
signatures = self._signatures_callback(*self._position)
|
||||
completion_names += get_signature_param_names(signatures)
|
||||
completion_names += _get_signature_param_names(
|
||||
signatures,
|
||||
positional_count,
|
||||
used_kwargs,
|
||||
)
|
||||
|
||||
kwargs_only = _must_be_kwarg(signatures, positional_count, used_kwargs)
|
||||
|
||||
if not kwargs_only:
|
||||
completion_names += self._complete_global_scope()
|
||||
completion_names += self._complete_inherited(is_function=False)
|
||||
|
||||
if not kwargs_only:
|
||||
completion_names += self._complete_keywords(
|
||||
allowed_transitions,
|
||||
only_values=not (not current_line or current_line[-1] in ' \t.;'
|
||||
and current_line[-3:] != '...')
|
||||
)
|
||||
|
||||
return cached_name, completion_names
|
||||
|
||||
@@ -358,80 +399,7 @@ class Completion:
|
||||
def _complete_trailer_for_values(self, values):
|
||||
user_context = get_user_context(self._module_context, self._position)
|
||||
|
||||
completion_names = []
|
||||
for value in values:
|
||||
for filter in value.get_filters(origin_scope=user_context.tree_node):
|
||||
completion_names += filter.values()
|
||||
|
||||
if not value.is_stub() and isinstance(value, TreeInstance):
|
||||
completion_names += self._complete_getattr(value)
|
||||
|
||||
python_values = convert_values(values)
|
||||
for c in python_values:
|
||||
if c not in values:
|
||||
for filter in c.get_filters(origin_scope=user_context.tree_node):
|
||||
completion_names += filter.values()
|
||||
return completion_names
|
||||
|
||||
def _complete_getattr(self, instance):
|
||||
"""
|
||||
A heuristic to make completion for proxy objects work. This is not
|
||||
intended to work in all cases. It works exactly in this case:
|
||||
|
||||
def __getattr__(self, name):
|
||||
...
|
||||
return getattr(any_object, name)
|
||||
|
||||
It is important that the return contains getattr directly, otherwise it
|
||||
won't work anymore. It's really just a stupid heuristic. It will not
|
||||
work if you write e.g. `return (getatr(o, name))`, because of the
|
||||
additional parentheses. It will also not work if you move the getattr
|
||||
to some other place that is not the return statement itself.
|
||||
|
||||
It is intentional that it doesn't work in all cases. Generally it's
|
||||
really hard to do even this case (as you can see below). Most people
|
||||
will write it like this anyway and the other ones, well they are just
|
||||
out of luck I guess :) ~dave.
|
||||
"""
|
||||
names = (instance.get_function_slot_names(u'__getattr__')
|
||||
or instance.get_function_slot_names(u'__getattribute__'))
|
||||
functions = ValueSet.from_sets(
|
||||
name.infer()
|
||||
for name in names
|
||||
)
|
||||
for func in functions:
|
||||
tree_node = func.tree_node
|
||||
for return_stmt in tree_node.iter_return_stmts():
|
||||
# Basically until the next comment we just try to find out if a
|
||||
# return statement looks exactly like `return getattr(x, name)`.
|
||||
if return_stmt.type != 'return_stmt':
|
||||
continue
|
||||
atom_expr = return_stmt.children[1]
|
||||
if atom_expr.type != 'atom_expr':
|
||||
continue
|
||||
atom = atom_expr.children[0]
|
||||
trailer = atom_expr.children[1]
|
||||
if len(atom_expr.children) != 2 or atom.type != 'name' \
|
||||
or atom.value != 'getattr':
|
||||
continue
|
||||
arglist = trailer.children[1]
|
||||
if arglist.type != 'arglist' or len(arglist.children) < 3:
|
||||
continue
|
||||
context = func.as_context()
|
||||
object_node = arglist.children[0]
|
||||
|
||||
# Make sure it's a param: foo in __getattr__(self, foo)
|
||||
name_node = arglist.children[2]
|
||||
name_list = context.goto(name_node, name_node.start_pos)
|
||||
if not any(n.api_type == 'param' for n in name_list):
|
||||
continue
|
||||
|
||||
# Now that we know that these are most probably completion
|
||||
# objects, we just infer the object and return them as
|
||||
# completions.
|
||||
objects = context.infer_node(object_node)
|
||||
return self._complete_trailer_for_values(objects)
|
||||
return []
|
||||
return complete_trailer(user_context, values)
|
||||
|
||||
def _get_importer_names(self, names, level=0, only_modules=True):
|
||||
names = [n.value for n in names]
|
||||
@@ -531,6 +499,8 @@ def _extract_string_while_in_string(leaf, position):
|
||||
if leaf.line == position[0]:
|
||||
kwargs['endpos'] = position[1] - leaf.column
|
||||
match = _string_start.match(leaf.value, **kwargs)
|
||||
if not match:
|
||||
return None, None, None
|
||||
start = match.group(0)
|
||||
if leaf.line == position[0] and position[1] < leaf.column + match.end():
|
||||
return None, None, None
|
||||
@@ -569,3 +539,123 @@ def _extract_string_while_in_string(leaf, position):
|
||||
leaves.insert(0, leaf)
|
||||
leaf = leaf.get_previous_leaf()
|
||||
return None, None, None
|
||||
|
||||
|
||||
def complete_trailer(user_context, values):
|
||||
completion_names = []
|
||||
for value in values:
|
||||
for filter in value.get_filters(origin_scope=user_context.tree_node):
|
||||
completion_names += filter.values()
|
||||
|
||||
if not value.is_stub() and isinstance(value, TreeInstance):
|
||||
completion_names += _complete_getattr(user_context, value)
|
||||
|
||||
python_values = convert_values(values)
|
||||
for c in python_values:
|
||||
if c not in values:
|
||||
for filter in c.get_filters(origin_scope=user_context.tree_node):
|
||||
completion_names += filter.values()
|
||||
return completion_names
|
||||
|
||||
|
||||
def _complete_getattr(user_context, instance):
|
||||
"""
|
||||
A heuristic to make completion for proxy objects work. This is not
|
||||
intended to work in all cases. It works exactly in this case:
|
||||
|
||||
def __getattr__(self, name):
|
||||
...
|
||||
return getattr(any_object, name)
|
||||
|
||||
It is important that the return contains getattr directly, otherwise it
|
||||
won't work anymore. It's really just a stupid heuristic. It will not
|
||||
work if you write e.g. `return (getatr(o, name))`, because of the
|
||||
additional parentheses. It will also not work if you move the getattr
|
||||
to some other place that is not the return statement itself.
|
||||
|
||||
It is intentional that it doesn't work in all cases. Generally it's
|
||||
really hard to do even this case (as you can see below). Most people
|
||||
will write it like this anyway and the other ones, well they are just
|
||||
out of luck I guess :) ~dave.
|
||||
"""
|
||||
names = (instance.get_function_slot_names(u'__getattr__')
|
||||
or instance.get_function_slot_names(u'__getattribute__'))
|
||||
functions = ValueSet.from_sets(
|
||||
name.infer()
|
||||
for name in names
|
||||
)
|
||||
for func in functions:
|
||||
tree_node = func.tree_node
|
||||
for return_stmt in tree_node.iter_return_stmts():
|
||||
# Basically until the next comment we just try to find out if a
|
||||
# return statement looks exactly like `return getattr(x, name)`.
|
||||
if return_stmt.type != 'return_stmt':
|
||||
continue
|
||||
atom_expr = return_stmt.children[1]
|
||||
if atom_expr.type != 'atom_expr':
|
||||
continue
|
||||
atom = atom_expr.children[0]
|
||||
trailer = atom_expr.children[1]
|
||||
if len(atom_expr.children) != 2 or atom.type != 'name' \
|
||||
or atom.value != 'getattr':
|
||||
continue
|
||||
arglist = trailer.children[1]
|
||||
if arglist.type != 'arglist' or len(arglist.children) < 3:
|
||||
continue
|
||||
context = func.as_context()
|
||||
object_node = arglist.children[0]
|
||||
|
||||
# Make sure it's a param: foo in __getattr__(self, foo)
|
||||
name_node = arglist.children[2]
|
||||
name_list = context.goto(name_node, name_node.start_pos)
|
||||
if not any(n.api_type == 'param' for n in name_list):
|
||||
continue
|
||||
|
||||
# Now that we know that these are most probably completion
|
||||
# objects, we just infer the object and return them as
|
||||
# completions.
|
||||
objects = context.infer_node(object_node)
|
||||
return complete_trailer(user_context, objects)
|
||||
return []
|
||||
|
||||
|
||||
def search_in_module(inference_state, module_context, names, wanted_names,
|
||||
wanted_type, complete=False, fuzzy=False,
|
||||
ignore_imports=False, convert=False):
|
||||
for s in wanted_names[:-1]:
|
||||
new_names = []
|
||||
for n in names:
|
||||
if s == n.string_name:
|
||||
if n.tree_name is not None and n.api_type == 'module' \
|
||||
and ignore_imports:
|
||||
continue
|
||||
new_names += complete_trailer(
|
||||
module_context,
|
||||
n.infer()
|
||||
)
|
||||
debug.dbg('dot lookup on search %s from %s', new_names, names[:10])
|
||||
names = new_names
|
||||
|
||||
last_name = wanted_names[-1].lower()
|
||||
for n in names:
|
||||
string = n.string_name.lower()
|
||||
if complete and helpers.match(string, last_name, fuzzy=fuzzy) \
|
||||
or not complete and string == last_name:
|
||||
if isinstance(n, SubModuleName):
|
||||
names = [v.name for v in n.infer()]
|
||||
else:
|
||||
names = [n]
|
||||
if convert:
|
||||
names = convert_names(names)
|
||||
for n2 in names:
|
||||
if complete:
|
||||
def_ = classes.Completion(
|
||||
inference_state, n2,
|
||||
stack=None,
|
||||
like_name_length=len(last_name),
|
||||
is_fuzzy=fuzzy,
|
||||
)
|
||||
else:
|
||||
def_ = classes.Name(inference_state, n2)
|
||||
if not wanted_type or wanted_type == def_.type:
|
||||
yield def_
|
||||
|
||||
@@ -17,7 +17,7 @@ import parso
|
||||
|
||||
_VersionInfo = namedtuple('VersionInfo', 'major minor micro')
|
||||
|
||||
_SUPPORTED_PYTHONS = ['3.8', '3.7', '3.6', '3.5', '3.4', '2.7']
|
||||
_SUPPORTED_PYTHONS = ['3.8', '3.7', '3.6', '3.5', '2.7']
|
||||
_SAFE_PATHS = ['/usr/bin', '/usr/local/bin']
|
||||
_CONDA_VAR = 'CONDA_PREFIX'
|
||||
_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
|
||||
@@ -91,8 +91,8 @@ class Environment(_BaseEnvironment):
|
||||
"""
|
||||
self.version_info = _VersionInfo(*info[2])
|
||||
"""
|
||||
Like ``sys.version_info``. A tuple to show the current Environment's
|
||||
Python version.
|
||||
Like :data:`sys.version_info`: a tuple to show the current
|
||||
Environment's Python version.
|
||||
"""
|
||||
|
||||
# py2 sends bytes via pickle apparently?!
|
||||
@@ -117,7 +117,7 @@ class Environment(_BaseEnvironment):
|
||||
def get_sys_path(self):
|
||||
"""
|
||||
The sys path for this environment. Does not include potential
|
||||
modifications like ``sys.path.append``.
|
||||
modifications from e.g. appending to :data:`sys.path`.
|
||||
|
||||
:returns: list of str
|
||||
"""
|
||||
@@ -185,7 +185,7 @@ def get_default_environment():
|
||||
makes it possible to use as many new Python features as possible when using
|
||||
autocompletion and other functionality.
|
||||
|
||||
:returns: :class:`Environment`
|
||||
:returns: :class:`.Environment`
|
||||
"""
|
||||
virtual_env = _get_virtual_env_from_var()
|
||||
if virtual_env is not None:
|
||||
@@ -254,7 +254,14 @@ def get_cached_default_environment():
|
||||
|
||||
@time_cache(seconds=10 * 60) # 10 Minutes
|
||||
def _get_cached_default_environment():
|
||||
return get_default_environment()
|
||||
try:
|
||||
return get_default_environment()
|
||||
except InvalidPythonEnvironment:
|
||||
# It's possible that `sys.executable` is wrong. Typically happens
|
||||
# when Jedi is used in an executable that embeds Python. For further
|
||||
# information, have a look at:
|
||||
# https://github.com/davidhalter/jedi/issues/1531
|
||||
return InterpreterEnvironment()
|
||||
|
||||
|
||||
def find_virtualenvs(paths=None, **kwargs):
|
||||
@@ -272,7 +279,7 @@ def find_virtualenvs(paths=None, **kwargs):
|
||||
CONDA_PREFIX will be checked to see if it contains a valid conda
|
||||
environment.
|
||||
|
||||
:yields: :class:`Environment`
|
||||
:yields: :class:`.Environment`
|
||||
"""
|
||||
def py27_comp(paths=None, safe=True, use_environment_vars=True):
|
||||
if paths is None:
|
||||
@@ -322,7 +329,7 @@ def find_system_environments():
|
||||
|
||||
The environments are sorted from latest to oldest Python version.
|
||||
|
||||
:yields: :class:`Environment`
|
||||
:yields: :class:`.Environment`
|
||||
"""
|
||||
for version_string in _SUPPORTED_PYTHONS:
|
||||
try:
|
||||
@@ -339,7 +346,7 @@ def get_system_environment(version):
|
||||
where X and Y are the major and minor versions of Python.
|
||||
|
||||
:raises: :exc:`.InvalidPythonEnvironment`
|
||||
:returns: :class:`Environment`
|
||||
:returns: :class:`.Environment`
|
||||
"""
|
||||
exe = which('python' + version)
|
||||
if exe:
|
||||
@@ -362,7 +369,7 @@ def create_environment(path, safe=True):
|
||||
Virtualenv path or an executable path.
|
||||
|
||||
:raises: :exc:`.InvalidPythonEnvironment`
|
||||
:returns: :class:`Environment`
|
||||
:returns: :class:`.Environment`
|
||||
"""
|
||||
if os.path.isfile(path):
|
||||
_assert_safe(path, safe)
|
||||
|
||||
43
jedi/api/errors.py
Normal file
43
jedi/api/errors.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
This file is about errors in Python files and not about exception handling in
|
||||
Jedi.
|
||||
"""
|
||||
|
||||
|
||||
def parso_to_jedi_errors(grammar, module_node):
|
||||
return [SyntaxError(e) for e in grammar.iter_errors(module_node)]
|
||||
|
||||
|
||||
class SyntaxError(object):
|
||||
"""
|
||||
Syntax errors are generated by :meth:`.Script.get_syntax_errors`.
|
||||
"""
|
||||
def __init__(self, parso_error):
|
||||
self._parso_error = parso_error
|
||||
|
||||
@property
|
||||
def line(self):
|
||||
"""The line where the error starts (starting with 1)."""
|
||||
return self._parso_error.start_pos[0]
|
||||
|
||||
@property
|
||||
def column(self):
|
||||
"""The column where the error starts (starting with 0)."""
|
||||
return self._parso_error.start_pos[1]
|
||||
|
||||
@property
|
||||
def until_line(self):
|
||||
"""The line where the error ends (starting with 1)."""
|
||||
return self._parso_error.end_pos[0]
|
||||
|
||||
@property
|
||||
def until_column(self):
|
||||
"""The column where the error ends (starting with 0)."""
|
||||
return self._parso_error.end_pos[1]
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s from=%s to=%s>' % (
|
||||
self.__class__.__name__,
|
||||
self._parso_error.start_pos,
|
||||
self._parso_error.end_pos,
|
||||
)
|
||||
@@ -3,8 +3,29 @@ class _JediError(Exception):
|
||||
|
||||
|
||||
class InternalError(_JediError):
|
||||
pass
|
||||
"""
|
||||
This error might happen a subprocess is crashing. The reason for this is
|
||||
usually broken C code in third party libraries. This is not a very common
|
||||
thing and it is safe to use Jedi again. However using the same calls might
|
||||
result in the same error again.
|
||||
"""
|
||||
|
||||
|
||||
class WrongVersion(_JediError):
|
||||
pass
|
||||
"""
|
||||
This error is reserved for the future, shouldn't really be happening at the
|
||||
moment.
|
||||
"""
|
||||
|
||||
|
||||
class RefactoringError(_JediError):
|
||||
"""
|
||||
Refactorings can fail for various reasons. So if you work with refactorings
|
||||
like :meth:`.Script.rename`, :meth:`.Script.inline`,
|
||||
:meth:`.Script.extract_variable` and :meth:`.Script.extract_function`, make
|
||||
sure to catch these. The descriptions in the errors are ususally valuable
|
||||
for end users.
|
||||
|
||||
A typical ``RefactoringError`` would tell the user that inlining is not
|
||||
possible if no name is under the cursor.
|
||||
"""
|
||||
|
||||
@@ -3,7 +3,7 @@ import os
|
||||
from jedi._compatibility import FileNotFoundError, force_unicode, scandir
|
||||
from jedi.api import classes
|
||||
from jedi.api.strings import StringName, get_quote_ending
|
||||
from jedi.api.helpers import fuzzy_match, start_match
|
||||
from jedi.api.helpers import match
|
||||
from jedi.inference.helpers import get_str_or_none
|
||||
|
||||
|
||||
@@ -11,12 +11,14 @@ class PathName(StringName):
|
||||
api_type = u'path'
|
||||
|
||||
|
||||
def complete_file_name(inference_state, module_context, start_leaf, string,
|
||||
def complete_file_name(inference_state, module_context, start_leaf, quote, string,
|
||||
like_name, signatures_callback, code_lines, position, fuzzy):
|
||||
# First we want to find out what can actually be changed as a name.
|
||||
like_name_length = len(os.path.basename(string))
|
||||
|
||||
addition = _get_string_additions(module_context, start_leaf)
|
||||
if string.startswith('~'):
|
||||
string = os.path.expanduser(string)
|
||||
if addition is None:
|
||||
return
|
||||
string = addition + string
|
||||
@@ -40,15 +42,12 @@ def complete_file_name(inference_state, module_context, start_leaf, string,
|
||||
# OSError: [Errno 36] File name too long: '...'
|
||||
except (FileNotFoundError, OSError):
|
||||
return
|
||||
quote_ending = get_quote_ending(quote, code_lines, position)
|
||||
for entry in listed:
|
||||
name = entry.name
|
||||
if fuzzy:
|
||||
match = fuzzy_match(name, must_start_with)
|
||||
else:
|
||||
match = start_match(name, must_start_with)
|
||||
if match:
|
||||
if match(name, must_start_with, fuzzy=fuzzy):
|
||||
if is_in_os_path_join or not entry.is_dir():
|
||||
name += get_quote_ending(start_leaf.value, code_lines, position)
|
||||
name += quote_ending
|
||||
else:
|
||||
name += os.path.sep
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ Helpers for the API
|
||||
import re
|
||||
from collections import namedtuple
|
||||
from textwrap import dedent
|
||||
from itertools import chain
|
||||
from functools import wraps
|
||||
|
||||
from parso.python.parser import Parser
|
||||
@@ -14,25 +15,33 @@ from jedi.inference.base_value import NO_VALUES
|
||||
from jedi.inference.syntax_tree import infer_atom
|
||||
from jedi.inference.helpers import infer_call_of_leaf
|
||||
from jedi.inference.compiled import get_string_value_set
|
||||
from jedi.cache import signature_time_cache
|
||||
from jedi.cache import signature_time_cache, memoize_method
|
||||
from jedi.parser_utils import get_parent_scope
|
||||
|
||||
|
||||
CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])
|
||||
|
||||
|
||||
def start_match(string, like_name):
|
||||
def _start_match(string, like_name):
|
||||
return string.startswith(like_name)
|
||||
|
||||
|
||||
def fuzzy_match(string, like_name):
|
||||
def _fuzzy_match(string, like_name):
|
||||
if len(like_name) <= 1:
|
||||
return like_name in string
|
||||
pos = string.find(like_name[0])
|
||||
if pos >= 0:
|
||||
return fuzzy_match(string[pos + 1:], like_name[1:])
|
||||
return _fuzzy_match(string[pos + 1:], like_name[1:])
|
||||
return False
|
||||
|
||||
|
||||
def match(string, like_name, fuzzy=False):
|
||||
if fuzzy:
|
||||
return _fuzzy_match(string, like_name)
|
||||
else:
|
||||
return _start_match(string, like_name)
|
||||
|
||||
|
||||
def sorted_definitions(defs):
|
||||
# Note: `or ''` below is required because `module_path` could be
|
||||
return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0, x.name))
|
||||
@@ -170,6 +179,28 @@ def infer(inference_state, context, leaf):
|
||||
return definitions
|
||||
|
||||
|
||||
def filter_follow_imports(names, follow_builtin_imports=False):
|
||||
for name in names:
|
||||
if name.is_import():
|
||||
new_names = list(filter_follow_imports(
|
||||
name.goto(),
|
||||
follow_builtin_imports=follow_builtin_imports,
|
||||
))
|
||||
found_builtin = False
|
||||
if follow_builtin_imports:
|
||||
for new_name in new_names:
|
||||
if new_name.start_pos is None:
|
||||
found_builtin = True
|
||||
|
||||
if found_builtin:
|
||||
yield name
|
||||
else:
|
||||
for new_name in new_names:
|
||||
yield new_name
|
||||
else:
|
||||
yield name
|
||||
|
||||
|
||||
class CallDetails(object):
|
||||
def __init__(self, bracket_leaf, children, position):
|
||||
['bracket_leaf', 'call_index', 'keyword_name_str']
|
||||
@@ -185,11 +216,15 @@ class CallDetails(object):
|
||||
def keyword_name_str(self):
|
||||
return _get_index_and_key(self._children, self._position)[1]
|
||||
|
||||
@memoize_method
|
||||
def _list_arguments(self):
|
||||
return list(_iter_arguments(self._children, self._position))
|
||||
|
||||
def calculate_index(self, param_names):
|
||||
positional_count = 0
|
||||
used_names = set()
|
||||
star_count = -1
|
||||
args = list(_iter_arguments(self._children, self._position))
|
||||
args = self._list_arguments()
|
||||
if not args:
|
||||
if param_names:
|
||||
return 0
|
||||
@@ -236,6 +271,19 @@ class CallDetails(object):
|
||||
return i
|
||||
return None
|
||||
|
||||
def iter_used_keyword_arguments(self):
|
||||
for star_count, key_start, had_equal in list(self._list_arguments()):
|
||||
if had_equal and key_start:
|
||||
yield key_start
|
||||
|
||||
def count_positional_arguments(self):
|
||||
count = 0
|
||||
for star_count, key_start, had_equal in self._list_arguments()[:-1]:
|
||||
if star_count:
|
||||
break
|
||||
count += 1
|
||||
return count
|
||||
|
||||
|
||||
def _iter_arguments(nodes, position):
|
||||
def remove_after_pos(name):
|
||||
@@ -433,3 +481,37 @@ def validate_line_column(func):
|
||||
column, line_len, line, line_string))
|
||||
return func(self, line, column, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def get_module_names(module, all_scopes, definitions=True, references=False):
|
||||
"""
|
||||
Returns a dictionary with name parts as keys and their call paths as
|
||||
values.
|
||||
"""
|
||||
def def_ref_filter(name):
|
||||
is_def = name.is_definition()
|
||||
return definitions and is_def or references and not is_def
|
||||
|
||||
names = list(chain.from_iterable(module.get_used_names().values()))
|
||||
if not all_scopes:
|
||||
# We have to filter all the names that don't have the module as a
|
||||
# parent_scope. There's None as a parent, because nodes in the module
|
||||
# node have the parent module and not suite as all the others.
|
||||
# Therefore it's important to catch that case.
|
||||
|
||||
def is_module_scope_name(name):
|
||||
parent_scope = get_parent_scope(name)
|
||||
# async functions have an extra wrapper. Strip it.
|
||||
if parent_scope and parent_scope.type == 'async_stmt':
|
||||
parent_scope = parent_scope.parent
|
||||
return parent_scope in (module, None)
|
||||
|
||||
names = [n for n in names if is_module_scope_name(n)]
|
||||
return filter(def_ref_filter, names)
|
||||
|
||||
|
||||
def split_search_string(name):
|
||||
type, _, dotted_names = name.rpartition(' ')
|
||||
if type == 'def':
|
||||
type = 'function'
|
||||
return type, dotted_names.split('.')
|
||||
|
||||
@@ -24,15 +24,18 @@ class MixedModuleContext(ModuleContext):
|
||||
super(MixedModuleContext, self).__init__(tree_module_value)
|
||||
self._namespace_objects = [NamespaceObject(n) for n in namespaces]
|
||||
|
||||
def _get_mixed_object(self, compiled_value):
|
||||
return mixed.MixedObject(
|
||||
compiled_value=compiled_value,
|
||||
tree_value=self._value
|
||||
)
|
||||
|
||||
def get_filters(self, *args, **kwargs):
|
||||
for filter in self._value.as_context().get_filters(*args, **kwargs):
|
||||
yield filter
|
||||
|
||||
for namespace_obj in self._namespace_objects:
|
||||
compiled_object = _create(self.inference_state, namespace_obj)
|
||||
mixed_object = mixed.MixedObject(
|
||||
compiled_object=compiled_object,
|
||||
tree_value=self._value
|
||||
)
|
||||
compiled_value = _create(self.inference_state, namespace_obj)
|
||||
mixed_object = self._get_mixed_object(compiled_value)
|
||||
for filter in mixed_object.get_filters(*args, **kwargs):
|
||||
yield filter
|
||||
|
||||
@@ -1,21 +1,57 @@
|
||||
import os
|
||||
import json
|
||||
"""
|
||||
Projects are a way to handle Python projects within Jedi. For simpler plugins
|
||||
you might not want to deal with projects, but if you want to give the user more
|
||||
flexibility to define sys paths and Python interpreters for a project,
|
||||
:class:`.Project` is the perfect way to allow for that.
|
||||
|
||||
from jedi._compatibility import FileNotFoundError, PermissionError, IsADirectoryError
|
||||
from jedi.api.environment import SameEnvironment, \
|
||||
get_cached_default_environment
|
||||
Projects can be saved to disk and loaded again, to allow project definitions to
|
||||
be used across repositories.
|
||||
"""
|
||||
import os
|
||||
import errno
|
||||
import json
|
||||
import sys
|
||||
|
||||
from jedi._compatibility import FileNotFoundError, PermissionError, \
|
||||
IsADirectoryError
|
||||
from jedi import debug
|
||||
from jedi.api.environment import get_cached_default_environment, create_environment
|
||||
from jedi.api.exceptions import WrongVersion
|
||||
from jedi.api.completion import search_in_module
|
||||
from jedi.api.helpers import split_search_string, get_module_names
|
||||
from jedi._compatibility import force_unicode
|
||||
from jedi.inference.imports import load_module_from_path, \
|
||||
load_namespace_from_path, iter_module_names
|
||||
from jedi.inference.sys_path import discover_buildout_paths
|
||||
from jedi.inference.cache import inference_state_as_method_param_cache
|
||||
from jedi.inference.references import recurse_find_python_folders_and_files, search_in_file_ios
|
||||
from jedi.file_io import FolderIO
|
||||
from jedi.common.utils import traverse_parents
|
||||
|
||||
_CONFIG_FOLDER = '.jedi'
|
||||
_CONTAINS_POTENTIAL_PROJECT = 'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in'
|
||||
_CONTAINS_POTENTIAL_PROJECT = \
|
||||
'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in', 'pyproject.toml'
|
||||
|
||||
_SERIALIZER_VERSION = 1
|
||||
|
||||
|
||||
def _try_to_skip_duplicates(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
found_tree_nodes = []
|
||||
found_modules = []
|
||||
for definition in func(*args, **kwargs):
|
||||
tree_node = definition._name.tree_name
|
||||
if tree_node is not None and tree_node in found_tree_nodes:
|
||||
continue
|
||||
if definition.type == 'module' and definition.module_path is not None:
|
||||
if definition.module_path in found_modules:
|
||||
continue
|
||||
found_modules.append(definition.module_path)
|
||||
yield definition
|
||||
found_tree_nodes.append(tree_node)
|
||||
return wrapper
|
||||
|
||||
|
||||
def _remove_duplicates_from_path(path):
|
||||
used = set()
|
||||
for p in path:
|
||||
@@ -30,63 +66,94 @@ def _force_unicode_list(lst):
|
||||
|
||||
|
||||
class Project(object):
|
||||
# TODO serialize environment
|
||||
_serializer_ignore_attributes = ('_environment',)
|
||||
"""
|
||||
Projects are a simple way to manage Python folders and define how Jedi does
|
||||
import resolution. It is mostly used as a parameter to :class:`.Script`.
|
||||
Additionally there are functions to search a whole project.
|
||||
"""
|
||||
_environment = None
|
||||
|
||||
@staticmethod
|
||||
def _get_config_folder_path(base_path):
|
||||
return os.path.join(base_path, _CONFIG_FOLDER)
|
||||
|
||||
@staticmethod
|
||||
def _get_json_path(base_path):
|
||||
return os.path.join(base_path, _CONFIG_FOLDER, 'project.json')
|
||||
return os.path.join(Project._get_config_folder_path(base_path), 'project.json')
|
||||
|
||||
@classmethod
|
||||
def load(cls, path):
|
||||
"""
|
||||
Loads a project from a specific path. You should not provide the path
|
||||
to ``.jedi/project.json``, but rather the path to the project folder.
|
||||
|
||||
:param path: The path of the directory you want to use as a project.
|
||||
"""
|
||||
with open(cls._get_json_path(path)) as f:
|
||||
version, data = json.load(f)
|
||||
|
||||
if version == 1:
|
||||
self = cls.__new__()
|
||||
self.__dict__.update(data)
|
||||
return self
|
||||
return cls(**data)
|
||||
else:
|
||||
raise WrongVersion(
|
||||
"The Jedi version of this project seems newer than what we can handle."
|
||||
)
|
||||
|
||||
def save(self):
|
||||
"""
|
||||
Saves the project configuration in the project in ``.jedi/project.json``.
|
||||
"""
|
||||
data = dict(self.__dict__)
|
||||
data.pop('_environment', None)
|
||||
data.pop('_django', None) # TODO make django setting public?
|
||||
data = {k.lstrip('_'): v for k, v in data.items()}
|
||||
|
||||
# TODO when dropping Python 2 use pathlib.Path.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
os.makedirs(self._get_config_folder_path(self._path))
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
with open(self._get_json_path(self._path), 'w') as f:
|
||||
return json.dump((_SERIALIZER_VERSION, data), f)
|
||||
|
||||
def __init__(self, path, **kwargs):
|
||||
"""
|
||||
:param path: The base path for this project.
|
||||
:param environment_path: The Python executable path, typically the path
|
||||
of a virtual environment.
|
||||
:param load_unsafe_extensions: Default False, Loads extensions that are not in the
|
||||
sys path and in the local directories. With this option enabled,
|
||||
this is potentially unsafe if you clone a git repository and
|
||||
analyze it's code, because those compiled extensions will be
|
||||
important and therefore have execution privileges.
|
||||
:param sys_path: list of str. You can override the sys path if you
|
||||
want. By default the ``sys.path.`` is generated from the
|
||||
want. By default the ``sys.path.`` is generated by the
|
||||
environment (virtualenvs, etc).
|
||||
:param added_sys_path: list of str. Adds these paths at the end of the
|
||||
sys path.
|
||||
:param smart_sys_path: If this is enabled (default), adds paths from
|
||||
local directories. Otherwise you will have to rely on your packages
|
||||
being properly configured on the ``sys.path``.
|
||||
"""
|
||||
def py2_comp(path, environment=None, sys_path=None,
|
||||
smart_sys_path=True, _django=False):
|
||||
def py2_comp(path, environment_path=None, load_unsafe_extensions=False,
|
||||
sys_path=None, added_sys_path=(), smart_sys_path=True):
|
||||
self._path = os.path.abspath(path)
|
||||
if isinstance(environment, SameEnvironment):
|
||||
self._environment = environment
|
||||
|
||||
self._environment_path = environment_path
|
||||
self._sys_path = sys_path
|
||||
self._smart_sys_path = smart_sys_path
|
||||
self._django = _django
|
||||
self._load_unsafe_extensions = load_unsafe_extensions
|
||||
self._django = False
|
||||
self.added_sys_path = list(added_sys_path)
|
||||
"""The sys path that is going to be added at the end of the """
|
||||
|
||||
py2_comp(path, **kwargs)
|
||||
|
||||
@inference_state_as_method_param_cache()
|
||||
def _get_base_sys_path(self, inference_state, environment=None):
|
||||
if self._sys_path is not None:
|
||||
return self._sys_path
|
||||
|
||||
def _get_base_sys_path(self, inference_state):
|
||||
# The sys path has not been set explicitly.
|
||||
if environment is None:
|
||||
environment = self.get_environment()
|
||||
|
||||
sys_path = list(environment.get_sys_path())
|
||||
sys_path = list(inference_state.environment.get_sys_path())
|
||||
try:
|
||||
sys_path.remove('')
|
||||
except ValueError:
|
||||
@@ -94,16 +161,19 @@ class Project(object):
|
||||
return sys_path
|
||||
|
||||
@inference_state_as_method_param_cache()
|
||||
def _get_sys_path(self, inference_state, environment=None,
|
||||
add_parent_paths=True, add_init_paths=False):
|
||||
def _get_sys_path(self, inference_state, add_parent_paths=True, add_init_paths=False):
|
||||
"""
|
||||
Keep this method private for all users of jedi. However internally this
|
||||
one is used like a public method.
|
||||
"""
|
||||
suffixed = []
|
||||
suffixed = list(self.added_sys_path)
|
||||
prefixed = []
|
||||
|
||||
sys_path = list(self._get_base_sys_path(inference_state, environment))
|
||||
if self._sys_path is None:
|
||||
sys_path = list(self._get_base_sys_path(inference_state))
|
||||
else:
|
||||
sys_path = list(self._sys_path)
|
||||
|
||||
if self._smart_sys_path:
|
||||
prefixed.append(self._path)
|
||||
|
||||
@@ -116,7 +186,7 @@ class Project(object):
|
||||
# 2. Stopping immediately when above self._path
|
||||
traversed = []
|
||||
for parent_path in traverse_parents(inference_state.script_path):
|
||||
if not parent_path.startswith(self._path):
|
||||
if parent_path == self._path or not parent_path.startswith(self._path):
|
||||
break
|
||||
if not add_init_paths \
|
||||
and os.path.isfile(os.path.join(parent_path, "__init__.py")):
|
||||
@@ -134,20 +204,144 @@ class Project(object):
|
||||
path = prefixed + sys_path + suffixed
|
||||
return list(_force_unicode_list(_remove_duplicates_from_path(path)))
|
||||
|
||||
def save(self):
|
||||
data = dict(self.__dict__)
|
||||
for attribute in self._serializer_ignore_attributes:
|
||||
data.pop(attribute, None)
|
||||
|
||||
with open(self._get_json_path(self._path), 'wb') as f:
|
||||
return json.dump((_SERIALIZER_VERSION, data), f)
|
||||
|
||||
def get_environment(self):
|
||||
if self._environment is None:
|
||||
return get_cached_default_environment()
|
||||
|
||||
if self._environment_path is not None:
|
||||
self._environment = create_environment(self._environment_path, safe=False)
|
||||
else:
|
||||
self._environment = get_cached_default_environment()
|
||||
return self._environment
|
||||
|
||||
def search(self, string, **kwargs):
|
||||
"""
|
||||
Searches a name in the whole project. If the project is very big,
|
||||
at some point Jedi will stop searching. However it's also very much
|
||||
recommended to not exhaust the generator. Just display the first ten
|
||||
results to the user.
|
||||
|
||||
There are currently three different search patterns:
|
||||
|
||||
- ``foo`` to search for a definition foo in any file or a file called
|
||||
``foo.py`` or ``foo.pyi``.
|
||||
- ``foo.bar`` to search for the ``foo`` and then an attribute ``bar``
|
||||
in it.
|
||||
- ``class foo.bar.Bar`` or ``def foo.bar.baz`` to search for a specific
|
||||
API type.
|
||||
|
||||
:param bool all_scopes: Default False; searches not only for
|
||||
definitions on the top level of a module level, but also in
|
||||
functions and classes.
|
||||
:yields: :class:`.Name`
|
||||
"""
|
||||
return self._search(string, **kwargs)
|
||||
|
||||
def complete_search(self, string, **kwargs):
|
||||
"""
|
||||
Like :meth:`.Script.search`, but completes that string. An empty string
|
||||
lists all definitions in a project, so be careful with that.
|
||||
|
||||
:param bool all_scopes: Default False; searches not only for
|
||||
definitions on the top level of a module level, but also in
|
||||
functions and classes.
|
||||
:yields: :class:`.Completion`
|
||||
"""
|
||||
return self._search_func(string, complete=True, **kwargs)
|
||||
|
||||
def _search(self, string, all_scopes=False): # Python 2..
|
||||
return self._search_func(string, all_scopes=all_scopes)
|
||||
|
||||
@_try_to_skip_duplicates
|
||||
def _search_func(self, string, complete=False, all_scopes=False):
|
||||
# Using a Script is they easiest way to get an empty module context.
|
||||
from jedi import Script
|
||||
s = Script('', project=self)
|
||||
inference_state = s._inference_state
|
||||
empty_module_context = s._get_module_context()
|
||||
|
||||
if inference_state.grammar.version_info < (3, 6) or sys.version_info < (3, 6):
|
||||
raise NotImplementedError(
|
||||
"No support for refactorings/search on Python 2/3.5"
|
||||
)
|
||||
debug.dbg('Search for string %s, complete=%s', string, complete)
|
||||
wanted_type, wanted_names = split_search_string(string)
|
||||
name = wanted_names[0]
|
||||
stub_folder_name = name + '-stubs'
|
||||
|
||||
ios = recurse_find_python_folders_and_files(FolderIO(self._path))
|
||||
file_ios = []
|
||||
|
||||
# 1. Search for modules in the current project
|
||||
for folder_io, file_io in ios:
|
||||
if file_io is None:
|
||||
file_name = folder_io.get_base_name()
|
||||
if file_name == name or file_name == stub_folder_name:
|
||||
f = folder_io.get_file_io('__init__.py')
|
||||
try:
|
||||
m = load_module_from_path(inference_state, f).as_context()
|
||||
except FileNotFoundError:
|
||||
f = folder_io.get_file_io('__init__.pyi')
|
||||
try:
|
||||
m = load_module_from_path(inference_state, f).as_context()
|
||||
except FileNotFoundError:
|
||||
m = load_namespace_from_path(inference_state, folder_io).as_context()
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
file_ios.append(file_io)
|
||||
file_name = os.path.basename(file_io.path)
|
||||
if file_name in (name + '.py', name + '.pyi'):
|
||||
m = load_module_from_path(inference_state, file_io).as_context()
|
||||
else:
|
||||
continue
|
||||
|
||||
debug.dbg('Search of a specific module %s', m)
|
||||
for x in search_in_module(
|
||||
inference_state,
|
||||
m,
|
||||
names=[m.name],
|
||||
wanted_type=wanted_type,
|
||||
wanted_names=wanted_names,
|
||||
complete=complete,
|
||||
convert=True,
|
||||
ignore_imports=True,
|
||||
):
|
||||
yield x # Python 2...
|
||||
|
||||
# 2. Search for identifiers in the project.
|
||||
for module_context in search_in_file_ios(inference_state, file_ios, name):
|
||||
names = get_module_names(module_context.tree_node, all_scopes=all_scopes)
|
||||
names = [module_context.create_name(n) for n in names]
|
||||
names = _remove_imports(names)
|
||||
for x in search_in_module(
|
||||
inference_state,
|
||||
module_context,
|
||||
names=names,
|
||||
wanted_type=wanted_type,
|
||||
wanted_names=wanted_names,
|
||||
complete=complete,
|
||||
ignore_imports=True,
|
||||
):
|
||||
yield x # Python 2...
|
||||
|
||||
# 3. Search for modules on sys.path
|
||||
sys_path = [
|
||||
p for p in self._get_sys_path(inference_state)
|
||||
# Exclude folders that are handled by recursing of the Python
|
||||
# folders.
|
||||
if not p.startswith(self._path)
|
||||
]
|
||||
names = list(iter_module_names(inference_state, empty_module_context, sys_path))
|
||||
for x in search_in_module(
|
||||
inference_state,
|
||||
empty_module_context,
|
||||
names=names,
|
||||
wanted_type=wanted_type,
|
||||
wanted_names=wanted_names,
|
||||
complete=complete,
|
||||
convert=True,
|
||||
):
|
||||
yield x # Python 2...
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._path)
|
||||
|
||||
@@ -167,10 +361,17 @@ def _is_django_path(directory):
|
||||
except (FileNotFoundError, IsADirectoryError, PermissionError):
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def get_default_project(path=None):
|
||||
"""
|
||||
If a project is not defined by the user, Jedi tries to define a project by
|
||||
itself as well as possible. Jedi traverses folders until it finds one of
|
||||
the following:
|
||||
|
||||
1. A ``.jedi/config.json``
|
||||
2. One of the following files: ``setup.py``, ``.git``, ``.hg``,
|
||||
``requirements.txt`` and ``MANIFEST.in``.
|
||||
"""
|
||||
if path is None:
|
||||
path = os.getcwd()
|
||||
|
||||
@@ -192,7 +393,9 @@ def get_default_project(path=None):
|
||||
first_no_init_file = dir
|
||||
|
||||
if _is_django_path(dir):
|
||||
return Project(dir, _django=True)
|
||||
project = Project(dir)
|
||||
project._django = True
|
||||
return project
|
||||
|
||||
if probable_path is None and _is_potential_project(dir):
|
||||
probable_path = dir
|
||||
@@ -206,3 +409,10 @@ def get_default_project(path=None):
|
||||
|
||||
curdir = path if os.path.isdir(path) else os.path.dirname(path)
|
||||
return Project(curdir)
|
||||
|
||||
|
||||
def _remove_imports(names):
|
||||
return [
|
||||
n for n in names
|
||||
if n.tree_name is None or n.api_type != 'module'
|
||||
]
|
||||
|
||||
225
jedi/api/refactoring/__init__.py
Normal file
225
jedi/api/refactoring/__init__.py
Normal file
@@ -0,0 +1,225 @@
|
||||
from os.path import dirname, basename, join, relpath
|
||||
import os
|
||||
import re
|
||||
import difflib
|
||||
|
||||
from parso import split_lines
|
||||
|
||||
from jedi.api.exceptions import RefactoringError
|
||||
|
||||
EXPRESSION_PARTS = (
|
||||
'or_test and_test not_test comparison '
|
||||
'expr xor_expr and_expr shift_expr arith_expr term factor power atom_expr'
|
||||
).split()
|
||||
|
||||
|
||||
class ChangedFile(object):
|
||||
def __init__(self, inference_state, from_path, to_path,
|
||||
module_node, node_to_str_map):
|
||||
self._inference_state = inference_state
|
||||
self._from_path = from_path
|
||||
self._to_path = to_path
|
||||
self._module_node = module_node
|
||||
self._node_to_str_map = node_to_str_map
|
||||
|
||||
def get_diff(self):
|
||||
old_lines = split_lines(self._module_node.get_code(), keepends=True)
|
||||
new_lines = split_lines(self.get_new_code(), keepends=True)
|
||||
project_path = self._inference_state.project._path
|
||||
diff = difflib.unified_diff(
|
||||
old_lines, new_lines,
|
||||
fromfile=relpath(self._from_path, project_path),
|
||||
tofile=relpath(self._to_path, project_path),
|
||||
)
|
||||
# Apparently there's a space at the end of the diff - for whatever
|
||||
# reason.
|
||||
return ''.join(diff).rstrip(' ')
|
||||
|
||||
def get_new_code(self):
|
||||
return self._inference_state.grammar.refactor(self._module_node, self._node_to_str_map)
|
||||
|
||||
def apply(self):
|
||||
if self._from_path is None:
|
||||
raise RefactoringError(
|
||||
'Cannot apply a refactoring on a Script with path=None'
|
||||
)
|
||||
|
||||
with open(self._from_path, 'w', newline='') as f:
|
||||
f.write(self.get_new_code())
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._from_path)
|
||||
|
||||
|
||||
class Refactoring(object):
|
||||
def __init__(self, inference_state, file_to_node_changes, renames=()):
|
||||
self._inference_state = inference_state
|
||||
self._renames = renames
|
||||
self._file_to_node_changes = file_to_node_changes
|
||||
|
||||
def get_changed_files(self):
|
||||
"""
|
||||
Returns a path to ``ChangedFile`` map.
|
||||
"""
|
||||
def calculate_to_path(p):
|
||||
if p is None:
|
||||
return p
|
||||
for from_, to in renames:
|
||||
if p.startswith(from_):
|
||||
p = to + p[len(from_):]
|
||||
return p
|
||||
|
||||
renames = self.get_renames()
|
||||
return {
|
||||
path: ChangedFile(
|
||||
self._inference_state,
|
||||
from_path=path,
|
||||
to_path=calculate_to_path(path),
|
||||
module_node=next(iter(map_)).get_root_node(),
|
||||
node_to_str_map=map_
|
||||
) for path, map_ in sorted(self._file_to_node_changes.items())
|
||||
}
|
||||
|
||||
def get_renames(self):
|
||||
"""
|
||||
Files can be renamed in a refactoring.
|
||||
|
||||
Returns ``Iterable[Tuple[str, str]]``.
|
||||
"""
|
||||
return sorted(self._renames)
|
||||
|
||||
def get_diff(self):
|
||||
text = ''
|
||||
project_path = self._inference_state.project._path
|
||||
for from_, to in self.get_renames():
|
||||
text += 'rename from %s\nrename to %s\n' \
|
||||
% (relpath(from_, project_path), relpath(to, project_path))
|
||||
|
||||
return text + ''.join(f.get_diff() for f in self.get_changed_files().values())
|
||||
|
||||
def apply(self):
|
||||
"""
|
||||
Applies the whole refactoring to the files, which includes renames.
|
||||
"""
|
||||
for f in self.get_changed_files().values():
|
||||
f.apply()
|
||||
|
||||
for old, new in self.get_renames():
|
||||
os.rename(old, new)
|
||||
|
||||
|
||||
def _calculate_rename(path, new_name):
|
||||
name = basename(path)
|
||||
dir_ = dirname(path)
|
||||
if name in ('__init__.py', '__init__.pyi'):
|
||||
parent_dir = dirname(dir_)
|
||||
return dir_, join(parent_dir, new_name)
|
||||
ending = re.search(r'\.pyi?$', name).group(0)
|
||||
return path, join(dir_, new_name + ending)
|
||||
|
||||
|
||||
def rename(inference_state, definitions, new_name):
|
||||
file_renames = set()
|
||||
file_tree_name_map = {}
|
||||
|
||||
if not definitions:
|
||||
raise RefactoringError("There is no name under the cursor")
|
||||
|
||||
for d in definitions:
|
||||
tree_name = d._name.tree_name
|
||||
if d.type == 'module' and tree_name is None:
|
||||
file_renames.add(_calculate_rename(d.module_path, new_name))
|
||||
else:
|
||||
# This private access is ok in a way. It's not public to
|
||||
# protect Jedi users from seeing it.
|
||||
if tree_name is not None:
|
||||
fmap = file_tree_name_map.setdefault(d.module_path, {})
|
||||
fmap[tree_name] = tree_name.prefix + new_name
|
||||
return Refactoring(inference_state, file_tree_name_map, file_renames)
|
||||
|
||||
|
||||
def inline(inference_state, names):
|
||||
if not names:
|
||||
raise RefactoringError("There is no name under the cursor")
|
||||
if any(n.api_type == 'module' for n in names):
|
||||
raise RefactoringError("Cannot inline imports or modules")
|
||||
if any(n.tree_name is None for n in names):
|
||||
raise RefactoringError("Cannot inline builtins/extensions")
|
||||
|
||||
definitions = [n for n in names if n.tree_name.is_definition()]
|
||||
if len(definitions) == 0:
|
||||
raise RefactoringError("No definition found to inline")
|
||||
if len(definitions) > 1:
|
||||
raise RefactoringError("Cannot inline a name with multiple definitions")
|
||||
|
||||
tree_name = definitions[0].tree_name
|
||||
|
||||
expr_stmt = tree_name.get_definition()
|
||||
if expr_stmt.type != 'expr_stmt':
|
||||
type_ = dict(
|
||||
funcdef='function',
|
||||
classdef='class',
|
||||
).get(expr_stmt.type, expr_stmt.type)
|
||||
raise RefactoringError("Cannot inline a %s" % type_)
|
||||
|
||||
if len(expr_stmt.get_defined_names(include_setitem=True)) > 1:
|
||||
raise RefactoringError("Cannot inline a statement with multiple definitions")
|
||||
first_child = expr_stmt.children[1]
|
||||
if first_child.type == 'annassign' and len(first_child.children) == 4:
|
||||
first_child = first_child.children[2]
|
||||
if first_child != '=':
|
||||
if first_child.type == 'annassign':
|
||||
raise RefactoringError(
|
||||
'Cannot inline a statement that is defined by an annotation'
|
||||
)
|
||||
else:
|
||||
raise RefactoringError(
|
||||
'Cannot inline a statement with "%s"'
|
||||
% first_child.get_code(include_prefix=False)
|
||||
)
|
||||
|
||||
rhs = expr_stmt.get_rhs()
|
||||
replace_code = rhs.get_code(include_prefix=False)
|
||||
|
||||
references = [n for n in names if not n.tree_name.is_definition()]
|
||||
file_to_node_changes = {}
|
||||
for name in references:
|
||||
tree_name = name.tree_name
|
||||
path = name.get_root_context().py__file__()
|
||||
s = replace_code
|
||||
if rhs.type == 'testlist_star_expr' \
|
||||
or tree_name.parent.type in EXPRESSION_PARTS \
|
||||
or tree_name.parent.type == 'trailer' \
|
||||
and tree_name.parent.get_next_sibling() is not None:
|
||||
s = '(' + replace_code + ')'
|
||||
|
||||
of_path = file_to_node_changes.setdefault(path, {})
|
||||
|
||||
n = tree_name
|
||||
prefix = n.prefix
|
||||
par = n.parent
|
||||
if par.type == 'trailer' and par.children[0] == '.':
|
||||
prefix = par.parent.children[0].prefix
|
||||
n = par
|
||||
for some_node in par.parent.children[:par.parent.children.index(par)]:
|
||||
of_path[some_node] = ''
|
||||
of_path[n] = prefix + s
|
||||
|
||||
path = definitions[0].get_root_context().py__file__()
|
||||
changes = file_to_node_changes.setdefault(path, {})
|
||||
changes[expr_stmt] = _remove_indent_of_prefix(expr_stmt.get_first_leaf().prefix)
|
||||
next_leaf = expr_stmt.get_next_leaf()
|
||||
|
||||
# Most of the time we have to remove the newline at the end of the
|
||||
# statement, but if there's a comment we might not need to.
|
||||
if next_leaf.prefix.strip(' \t') == '' \
|
||||
and (next_leaf.type == 'newline' or next_leaf == ';'):
|
||||
changes[next_leaf] = ''
|
||||
return Refactoring(inference_state, file_to_node_changes)
|
||||
|
||||
|
||||
def _remove_indent_of_prefix(prefix):
|
||||
r"""
|
||||
Removes the last indentation of a prefix, e.g. " \n \n " becomes " \n \n".
|
||||
"""
|
||||
return ''.join(split_lines(prefix, keepends=True)[:-1])
|
||||
386
jedi/api/refactoring/extract.py
Normal file
386
jedi/api/refactoring/extract.py
Normal file
@@ -0,0 +1,386 @@
|
||||
from textwrap import dedent
|
||||
|
||||
from parso import split_lines
|
||||
|
||||
from jedi import debug
|
||||
from jedi.api.exceptions import RefactoringError
|
||||
from jedi.api.refactoring import Refactoring, EXPRESSION_PARTS
|
||||
from jedi.common.utils import indent_block
|
||||
from jedi.parser_utils import function_is_classmethod, function_is_staticmethod
|
||||
|
||||
|
||||
_EXTRACT_USE_PARENT = EXPRESSION_PARTS + ['trailer']
|
||||
_DEFINITION_SCOPES = ('suite', 'file_input')
|
||||
_VARIABLE_EXCTRACTABLE = EXPRESSION_PARTS + \
|
||||
('atom testlist_star_expr testlist test lambdef lambdef_nocond '
|
||||
'keyword name number string fstring').split()
|
||||
|
||||
|
||||
def extract_variable(inference_state, path, module_node, name, pos, until_pos):
|
||||
nodes = _find_nodes(module_node, pos, until_pos)
|
||||
debug.dbg('Extracting nodes: %s', nodes)
|
||||
|
||||
is_expression, message = _is_expression_with_error(nodes)
|
||||
if not is_expression:
|
||||
raise RefactoringError(message)
|
||||
|
||||
generated_code = name + ' = ' + _expression_nodes_to_string(nodes)
|
||||
file_to_node_changes = {path: _replace(nodes, name, generated_code, pos)}
|
||||
return Refactoring(inference_state, file_to_node_changes)
|
||||
|
||||
|
||||
def _is_expression_with_error(nodes):
|
||||
"""
|
||||
Returns a tuple (is_expression, error_string).
|
||||
"""
|
||||
if any(node.type == 'name' and node.is_definition() for node in nodes):
|
||||
return False, 'Cannot extract a name that defines something'
|
||||
|
||||
if nodes[0].type not in _VARIABLE_EXCTRACTABLE:
|
||||
return False, 'Cannot extract a "%s"' % nodes[0].type
|
||||
return True, ''
|
||||
|
||||
|
||||
def _find_nodes(module_node, pos, until_pos):
|
||||
"""
|
||||
Looks up a module and tries to find the appropriate amount of nodes that
|
||||
are in there.
|
||||
"""
|
||||
start_node = module_node.get_leaf_for_position(pos, include_prefixes=True)
|
||||
|
||||
if until_pos is None:
|
||||
if start_node.type == 'operator':
|
||||
next_leaf = start_node.get_next_leaf()
|
||||
if next_leaf is not None and next_leaf.start_pos == pos:
|
||||
start_node = next_leaf
|
||||
|
||||
if _is_not_extractable_syntax(start_node):
|
||||
start_node = start_node.parent
|
||||
|
||||
while start_node.parent.type in _EXTRACT_USE_PARENT:
|
||||
start_node = start_node.parent
|
||||
|
||||
nodes = [start_node]
|
||||
else:
|
||||
# Get the next leaf if we are at the end of a leaf
|
||||
if start_node.end_pos == pos:
|
||||
next_leaf = start_node.get_next_leaf()
|
||||
if next_leaf is not None:
|
||||
start_node = next_leaf
|
||||
|
||||
# Some syntax is not exactable, just use its parent
|
||||
if _is_not_extractable_syntax(start_node):
|
||||
start_node = start_node.parent
|
||||
|
||||
# Find the end
|
||||
end_leaf = module_node.get_leaf_for_position(until_pos, include_prefixes=True)
|
||||
if end_leaf.start_pos > until_pos:
|
||||
end_leaf = end_leaf.get_previous_leaf()
|
||||
if end_leaf is None:
|
||||
raise RefactoringError('Cannot extract anything from that')
|
||||
|
||||
parent_node = start_node
|
||||
while parent_node.end_pos < end_leaf.end_pos:
|
||||
parent_node = parent_node.parent
|
||||
|
||||
nodes = _remove_unwanted_expression_nodes(parent_node, pos, until_pos)
|
||||
|
||||
# If the user marks just a return statement, we return the expression
|
||||
# instead of the whole statement, because the user obviously wants to
|
||||
# extract that part.
|
||||
if len(nodes) == 1 and start_node.type in ('return_stmt', 'yield_expr'):
|
||||
return [nodes[0].children[1]]
|
||||
return nodes
|
||||
|
||||
|
||||
def _replace(nodes, expression_replacement, extracted, pos,
|
||||
insert_before_leaf=None, remaining_prefix=None):
|
||||
# Now try to replace the nodes found with a variable and move the code
|
||||
# before the current statement.
|
||||
definition = _get_parent_definition(nodes[0])
|
||||
if insert_before_leaf is None:
|
||||
insert_before_leaf = definition.get_first_leaf()
|
||||
first_node_leaf = nodes[0].get_first_leaf()
|
||||
|
||||
lines = split_lines(insert_before_leaf.prefix, keepends=True)
|
||||
if first_node_leaf is insert_before_leaf:
|
||||
if remaining_prefix is not None:
|
||||
# The remaining prefix has already been calculated.
|
||||
lines[:-1] = remaining_prefix
|
||||
lines[-1:-1] = [indent_block(extracted, lines[-1]) + '\n']
|
||||
extracted_prefix = ''.join(lines)
|
||||
|
||||
replacement_dct = {}
|
||||
if first_node_leaf is insert_before_leaf:
|
||||
replacement_dct[nodes[0]] = extracted_prefix + expression_replacement
|
||||
else:
|
||||
if remaining_prefix is None:
|
||||
p = first_node_leaf.prefix
|
||||
else:
|
||||
p = remaining_prefix + _get_indentation(nodes[0])
|
||||
replacement_dct[nodes[0]] = p + expression_replacement
|
||||
replacement_dct[insert_before_leaf] = extracted_prefix + insert_before_leaf.value
|
||||
|
||||
for node in nodes[1:]:
|
||||
replacement_dct[node] = ''
|
||||
return replacement_dct
|
||||
|
||||
|
||||
def _expression_nodes_to_string(nodes):
|
||||
return ''.join(n.get_code(include_prefix=i != 0) for i, n in enumerate(nodes))
|
||||
|
||||
|
||||
def _suite_nodes_to_string(nodes, pos):
|
||||
n = nodes[0]
|
||||
prefix, part_of_code = _split_prefix_at(n.get_first_leaf(), pos[0] - 1)
|
||||
code = part_of_code + n.get_code(include_prefix=False) \
|
||||
+ ''.join(n.get_code() for n in nodes[1:])
|
||||
return prefix, code
|
||||
|
||||
|
||||
def _split_prefix_at(leaf, until_line):
|
||||
"""
|
||||
Returns a tuple of the leaf's prefix, split at the until_line
|
||||
position.
|
||||
"""
|
||||
# second means the second returned part
|
||||
second_line_count = leaf.start_pos[0] - until_line
|
||||
lines = split_lines(leaf.prefix, keepends=True)
|
||||
return ''.join(lines[:-second_line_count]), ''.join(lines[-second_line_count:])
|
||||
|
||||
|
||||
def _get_indentation(node):
|
||||
return split_lines(node.get_first_leaf().prefix)[-1]
|
||||
|
||||
|
||||
def _get_parent_definition(node):
|
||||
"""
|
||||
Returns the statement where a node is defined.
|
||||
"""
|
||||
while node is not None:
|
||||
if node.parent.type in _DEFINITION_SCOPES:
|
||||
return node
|
||||
node = node.parent
|
||||
raise NotImplementedError('We should never even get here')
|
||||
|
||||
|
||||
def _remove_unwanted_expression_nodes(parent_node, pos, until_pos):
|
||||
"""
|
||||
This function makes it so for `1 * 2 + 3` you can extract `2 + 3`, even
|
||||
though it is not part of the expression.
|
||||
"""
|
||||
typ = parent_node.type
|
||||
is_suite_part = typ in ('suite', 'file_input')
|
||||
if typ in EXPRESSION_PARTS or is_suite_part:
|
||||
nodes = parent_node.children
|
||||
for i, n in enumerate(nodes):
|
||||
if n.end_pos > pos:
|
||||
start_index = i
|
||||
if n.type == 'operator':
|
||||
start_index -= 1
|
||||
break
|
||||
for i, n in reversed(list(enumerate(nodes))):
|
||||
if n.start_pos < until_pos:
|
||||
end_index = i
|
||||
if n.type == 'operator':
|
||||
end_index += 1
|
||||
|
||||
# Something like `not foo or bar` should not be cut after not
|
||||
for n2 in nodes[i:]:
|
||||
if _is_not_extractable_syntax(n2):
|
||||
end_index += 1
|
||||
else:
|
||||
break
|
||||
break
|
||||
nodes = nodes[start_index:end_index + 1]
|
||||
if not is_suite_part:
|
||||
nodes[0:1] = _remove_unwanted_expression_nodes(nodes[0], pos, until_pos)
|
||||
nodes[-1:] = _remove_unwanted_expression_nodes(nodes[-1], pos, until_pos)
|
||||
return nodes
|
||||
return [parent_node]
|
||||
|
||||
|
||||
def _is_not_extractable_syntax(node):
|
||||
return node.type == 'operator' \
|
||||
or node.type == 'keyword' and node.value not in ('None', 'True', 'False')
|
||||
|
||||
|
||||
def extract_function(inference_state, path, module_context, name, pos, until_pos):
|
||||
nodes = _find_nodes(module_context.tree_node, pos, until_pos)
|
||||
assert len(nodes)
|
||||
|
||||
is_expression, _ = _is_expression_with_error(nodes)
|
||||
context = module_context.create_context(nodes[0])
|
||||
is_bound_method = context.is_bound_method()
|
||||
params, return_variables = list(_find_inputs_and_outputs(module_context, context, nodes))
|
||||
|
||||
# Find variables
|
||||
# Is a class method / method
|
||||
if context.is_module():
|
||||
insert_before_leaf = None # Leaf will be determined later
|
||||
else:
|
||||
node = _get_code_insertion_node(context.tree_node, is_bound_method)
|
||||
insert_before_leaf = node.get_first_leaf()
|
||||
if is_expression:
|
||||
code_block = 'return ' + _expression_nodes_to_string(nodes) + '\n'
|
||||
remaining_prefix = None
|
||||
has_ending_return_stmt = False
|
||||
else:
|
||||
has_ending_return_stmt = _is_node_ending_return_stmt(nodes[-1])
|
||||
if not has_ending_return_stmt:
|
||||
# Find the actually used variables (of the defined ones). If none are
|
||||
# used (e.g. if the range covers the whole function), return the last
|
||||
# defined variable.
|
||||
return_variables = list(_find_needed_output_variables(
|
||||
context,
|
||||
nodes[0].parent,
|
||||
nodes[-1].end_pos,
|
||||
return_variables
|
||||
)) or [return_variables[-1]] if return_variables else []
|
||||
|
||||
remaining_prefix, code_block = _suite_nodes_to_string(nodes, pos)
|
||||
after_leaf = nodes[-1].get_next_leaf()
|
||||
first, second = _split_prefix_at(after_leaf, until_pos[0])
|
||||
code_block += first
|
||||
|
||||
code_block = dedent(code_block)
|
||||
if not has_ending_return_stmt:
|
||||
output_var_str = ', '.join(return_variables)
|
||||
code_block += 'return ' + output_var_str + '\n'
|
||||
|
||||
# Check if we have to raise RefactoringError
|
||||
_check_for_non_extractables(nodes[:-1] if has_ending_return_stmt else nodes)
|
||||
|
||||
decorator = ''
|
||||
self_param = None
|
||||
if is_bound_method:
|
||||
if not function_is_staticmethod(context.tree_node):
|
||||
function_param_names = context.get_value().get_param_names()
|
||||
if len(function_param_names):
|
||||
self_param = function_param_names[0].string_name
|
||||
params = [p for p in params if p != self_param]
|
||||
|
||||
if function_is_classmethod(context.tree_node):
|
||||
decorator = '@classmethod\n'
|
||||
else:
|
||||
code_block += '\n'
|
||||
|
||||
function_code = '%sdef %s(%s):\n%s' % (
|
||||
decorator,
|
||||
name,
|
||||
', '.join(params if self_param is None else [self_param] + params),
|
||||
indent_block(code_block)
|
||||
)
|
||||
|
||||
function_call = '%s(%s)' % (
|
||||
('' if self_param is None else self_param + '.') + name,
|
||||
', '.join(params)
|
||||
)
|
||||
if is_expression:
|
||||
replacement = function_call
|
||||
else:
|
||||
if has_ending_return_stmt:
|
||||
replacement = 'return ' + function_call + '\n'
|
||||
else:
|
||||
replacement = output_var_str + ' = ' + function_call + '\n'
|
||||
|
||||
replacement_dct = _replace(nodes, replacement, function_code, pos,
|
||||
insert_before_leaf, remaining_prefix)
|
||||
if not is_expression:
|
||||
replacement_dct[after_leaf] = second + after_leaf.value
|
||||
file_to_node_changes = {path: replacement_dct}
|
||||
return Refactoring(inference_state, file_to_node_changes)
|
||||
|
||||
|
||||
def _check_for_non_extractables(nodes):
|
||||
for n in nodes:
|
||||
try:
|
||||
children = n.children
|
||||
except AttributeError:
|
||||
if n.value == 'return':
|
||||
raise RefactoringError(
|
||||
'Can only extract return statements if they are at the end.')
|
||||
if n.value == 'yield':
|
||||
raise RefactoringError('Cannot extract yield statements.')
|
||||
else:
|
||||
_check_for_non_extractables(children)
|
||||
|
||||
|
||||
def _is_name_input(module_context, names, first, last):
|
||||
for name in names:
|
||||
if name.api_type == 'param' or not name.parent_context.is_module():
|
||||
if name.get_root_context() is not module_context:
|
||||
return True
|
||||
if name.start_pos is None or not (first <= name.start_pos < last):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _find_inputs_and_outputs(module_context, context, nodes):
|
||||
first = nodes[0].start_pos
|
||||
last = nodes[-1].end_pos
|
||||
|
||||
inputs = []
|
||||
outputs = []
|
||||
for name in _find_non_global_names(nodes):
|
||||
if name.is_definition():
|
||||
if name not in outputs:
|
||||
outputs.append(name.value)
|
||||
else:
|
||||
if name.value not in inputs:
|
||||
name_definitions = context.goto(name, name.start_pos)
|
||||
if not name_definitions \
|
||||
or _is_name_input(module_context, name_definitions, first, last):
|
||||
inputs.append(name.value)
|
||||
|
||||
# Check if outputs are really needed:
|
||||
return inputs, outputs
|
||||
|
||||
|
||||
def _find_non_global_names(nodes):
|
||||
for node in nodes:
|
||||
try:
|
||||
children = node.children
|
||||
except AttributeError:
|
||||
if node.type == 'name':
|
||||
yield node
|
||||
else:
|
||||
# We only want to check foo in foo.bar
|
||||
if node.type == 'trailer' and node.children[0] == '.':
|
||||
continue
|
||||
|
||||
for x in _find_non_global_names(children): # Python 2...
|
||||
yield x
|
||||
|
||||
|
||||
def _get_code_insertion_node(node, is_bound_method):
|
||||
if not is_bound_method or function_is_staticmethod(node):
|
||||
while node.parent.type != 'file_input':
|
||||
node = node.parent
|
||||
|
||||
while node.parent.type in ('async_funcdef', 'decorated', 'async_stmt'):
|
||||
node = node.parent
|
||||
return node
|
||||
|
||||
|
||||
def _find_needed_output_variables(context, search_node, at_least_pos, return_variables):
|
||||
"""
|
||||
Searches everything after at_least_pos in a node and checks if any of the
|
||||
return_variables are used in there and returns those.
|
||||
"""
|
||||
for node in search_node.children:
|
||||
if node.start_pos < at_least_pos:
|
||||
continue
|
||||
|
||||
return_variables = set(return_variables)
|
||||
for name in _find_non_global_names([node]):
|
||||
if not name.is_definition() and name.value in return_variables:
|
||||
return_variables.remove(name.value)
|
||||
yield name.value
|
||||
|
||||
|
||||
def _is_node_ending_return_stmt(node):
|
||||
t = node.type
|
||||
if t == 'simple_stmt':
|
||||
return _is_node_ending_return_stmt(node.children[0])
|
||||
return t == 'return_stmt'
|
||||
@@ -93,17 +93,16 @@ def _get_string_prefix_and_quote(string):
|
||||
return match.group(1), match.group(2)
|
||||
|
||||
|
||||
def _get_string_quote(string):
|
||||
return _get_string_prefix_and_quote(string)[1]
|
||||
|
||||
|
||||
def _matches_quote_at_position(code_lines, quote, position):
|
||||
string = code_lines[position[0] - 1][position[1]:position[1] + len(quote)]
|
||||
return string == quote
|
||||
|
||||
|
||||
def get_quote_ending(string, code_lines, position, invert_result=False):
|
||||
quote = _get_string_quote(string)
|
||||
_, quote = _get_string_prefix_and_quote(string)
|
||||
if quote is None:
|
||||
return ''
|
||||
|
||||
# Add a quote only if it's not already there.
|
||||
if _matches_quote_at_position(code_lines, quote, position) != invert_result:
|
||||
return ''
|
||||
|
||||
@@ -20,38 +20,6 @@ from parso.cache import parser_cache
|
||||
_time_caches = {}
|
||||
|
||||
|
||||
def underscore_memoization(func):
|
||||
"""
|
||||
Decorator for methods::
|
||||
|
||||
class A(object):
|
||||
def x(self):
|
||||
if self._x:
|
||||
self._x = 10
|
||||
return self._x
|
||||
|
||||
Becomes::
|
||||
|
||||
class A(object):
|
||||
@underscore_memoization
|
||||
def x(self):
|
||||
return 10
|
||||
|
||||
A now has an attribute ``_x`` written by this decorator.
|
||||
"""
|
||||
name = '_' + func.__name__
|
||||
|
||||
def wrapper(self):
|
||||
try:
|
||||
return getattr(self, name)
|
||||
except AttributeError:
|
||||
result = func(self)
|
||||
setattr(self, name, result)
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def clear_time_caches(delete_all=False):
|
||||
""" Jedi caches many things, that should be completed after each completion
|
||||
finishes.
|
||||
|
||||
@@ -24,3 +24,13 @@ def monkeypatch(obj, attribute_name, new_value):
|
||||
yield
|
||||
finally:
|
||||
setattr(obj, attribute_name, old_value)
|
||||
|
||||
|
||||
def indent_block(text, indention=' '):
|
||||
"""This function indents a text block with a default of four spaces."""
|
||||
temp = ''
|
||||
while text and text[-1] == '\n':
|
||||
temp += text[-1]
|
||||
text = text[:-1]
|
||||
lines = text.split('\n')
|
||||
return '\n'.join(map(lambda s: indention + s, lines)) + temp
|
||||
|
||||
@@ -10,6 +10,43 @@ class BaseValue(object):
|
||||
return value
|
||||
value = value.parent_context
|
||||
|
||||
def infer_type_vars(self, value_set, is_class_value=False):
|
||||
"""
|
||||
When the current instance represents a type annotation, this method
|
||||
tries to find information about undefined type vars and returns a dict
|
||||
from type var name to value set.
|
||||
|
||||
This is for example important to understand what `iter([1])` returns.
|
||||
According to typeshed, `iter` returns an `Iterator[_T]`:
|
||||
|
||||
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
|
||||
|
||||
This functions would generate `int` for `_T` in this case, because it
|
||||
unpacks the `Iterable`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
`self`: represents the annotation of the current parameter to infer the
|
||||
value for. In the above example, this would initially be the
|
||||
`Iterable[_T]` of the `iterable` parameter and then, when recursing,
|
||||
just the `_T` generic parameter.
|
||||
|
||||
`value_set`: represents the actual argument passed to the parameter
|
||||
we're inferrined for, or (for recursive calls) their types. In the
|
||||
above example this would first be the representation of the list
|
||||
`[1]` and then, when recursing, just of `1`.
|
||||
|
||||
`is_class_value`: tells us whether or not to treat the `value_set` as
|
||||
representing the instances or types being passed, which is neccesary
|
||||
to correctly cope with `Type[T]` annotations. When it is True, this
|
||||
means that we are being called with a nested portion of an
|
||||
annotation and that the `value_set` represents the types of the
|
||||
arguments, rather than their actual instances. Note: not all
|
||||
recursive calls will neccesarily set this to True.
|
||||
"""
|
||||
return {}
|
||||
|
||||
|
||||
class BaseValueSet(object):
|
||||
def __init__(self, iterable):
|
||||
|
||||
@@ -7,6 +7,9 @@ class AbstractFolderIO(object):
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
|
||||
def get_base_name(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def list(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@@ -63,7 +63,6 @@ only *inferes* what needs to be *inferred*. All the statements and modules
|
||||
that are not used are just being ignored.
|
||||
"""
|
||||
import parso
|
||||
from parso import python_bytes_to_unicode
|
||||
from jedi.file_io import FileIO
|
||||
|
||||
from jedi import debug
|
||||
@@ -142,7 +141,7 @@ class InferenceState(object):
|
||||
|
||||
def get_sys_path(self, **kwargs):
|
||||
"""Convenience function"""
|
||||
return self.project._get_sys_path(self, environment=self.environment, **kwargs)
|
||||
return self.project._get_sys_path(self, **kwargs)
|
||||
|
||||
def infer(self, context, name):
|
||||
def_ = name.get_definition(import_name_always=True)
|
||||
@@ -186,7 +185,7 @@ class InferenceState(object):
|
||||
file_io = FileIO(path)
|
||||
code = file_io.read()
|
||||
# We cannot just use parso, because it doesn't use errors='replace'.
|
||||
code = python_bytes_to_unicode(code, encoding=encoding, errors='replace')
|
||||
code = parso.python_bytes_to_unicode(code, encoding=encoding, errors='replace')
|
||||
|
||||
if len(code) > settings._cropped_file_size:
|
||||
code = code[:settings._cropped_file_size]
|
||||
|
||||
@@ -18,7 +18,7 @@ def try_iter_content(types, depth=0):
|
||||
"""Helper method for static analysis."""
|
||||
if depth > 10:
|
||||
# It's possible that a loop has references on itself (especially with
|
||||
# CompiledObject). Therefore don't loop infinitely.
|
||||
# CompiledValue). Therefore don't loop infinitely.
|
||||
return
|
||||
|
||||
for typ in types:
|
||||
@@ -131,15 +131,6 @@ def _parse_argument_clinic(string):
|
||||
|
||||
|
||||
class _AbstractArgumentsMixin(object):
|
||||
def infer_all(self, funcdef=None):
|
||||
"""
|
||||
Inferes all arguments as a support for static analysis
|
||||
(normally Jedi).
|
||||
"""
|
||||
for key, lazy_value in self.unpack():
|
||||
types = lazy_value.infer()
|
||||
try_iter_content(types)
|
||||
|
||||
def unpack(self, funcdef=None):
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -170,7 +161,9 @@ def unpack_arglist(arglist):
|
||||
if child == ',':
|
||||
continue
|
||||
elif child in ('*', '**'):
|
||||
yield len(child.value), next(iterator)
|
||||
c = next(iterator, None)
|
||||
assert c is not None
|
||||
yield len(child.value), c
|
||||
elif child.type == 'argument' and \
|
||||
child.children[0] in ('*', '**'):
|
||||
assert len(child.children) == 2
|
||||
|
||||
@@ -70,8 +70,6 @@ class HelperValueMixin(object):
|
||||
yield f
|
||||
|
||||
def goto(self, name_or_str, name_context=None, analysis_errors=True):
|
||||
if name_context is None:
|
||||
name_context = self
|
||||
from jedi.inference import finder
|
||||
filters = self._get_value_filters(name_or_str)
|
||||
names = finder.filter_name(filters, name_or_str)
|
||||
@@ -218,7 +216,6 @@ class Value(HelperValueMixin, BaseValue):
|
||||
return ''
|
||||
else:
|
||||
return clean_scope_docstring(self.tree_node)
|
||||
return None
|
||||
|
||||
def get_safe_value(self, default=sentinel):
|
||||
if default is sentinel:
|
||||
@@ -258,12 +255,16 @@ class Value(HelperValueMixin, BaseValue):
|
||||
def _as_context(self):
|
||||
raise NotImplementedError('Not all values need to be converted to contexts: %s', self)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def py__name__(self):
|
||||
return self.name.string_name
|
||||
|
||||
def get_type_hint(self, add_class_info=True):
|
||||
return None
|
||||
|
||||
|
||||
def iterate_values(values, contextualized_node=None, is_async=False):
|
||||
"""
|
||||
@@ -414,6 +415,38 @@ class ValueSet(BaseValueSet):
|
||||
def get_signatures(self):
|
||||
return [sig for c in self._set for sig in c.get_signatures()]
|
||||
|
||||
def get_type_hint(self, add_class_info=True):
|
||||
t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set]
|
||||
type_hints = sorted(filter(None, t))
|
||||
if len(type_hints) == 1:
|
||||
return type_hints[0]
|
||||
|
||||
optional = 'None' in type_hints
|
||||
if optional:
|
||||
type_hints.remove('None')
|
||||
|
||||
if len(type_hints) == 0:
|
||||
return None
|
||||
elif len(type_hints) == 1:
|
||||
s = type_hints[0]
|
||||
else:
|
||||
s = 'Union[%s]' % ', '.join(type_hints)
|
||||
if optional:
|
||||
s = 'Optional[%s]' % s
|
||||
return s
|
||||
|
||||
def infer_type_vars(self, value_set, is_class_value=False):
|
||||
# Circular
|
||||
from jedi.inference.gradual.annotation import merge_type_var_dicts
|
||||
|
||||
type_var_dict = {}
|
||||
for value in self._set:
|
||||
merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
value.infer_type_vars(value_set, is_class_value),
|
||||
)
|
||||
return type_var_dict
|
||||
|
||||
|
||||
NO_VALUES = ValueSet([])
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from jedi._compatibility import unicode
|
||||
from jedi.inference.compiled.value import CompiledObject, CompiledName, \
|
||||
CompiledObjectFilter, CompiledValueName, create_from_access_path
|
||||
from jedi.inference.compiled.value import CompiledValue, CompiledName, \
|
||||
CompiledValueFilter, CompiledValueName, create_from_access_path
|
||||
from jedi.inference.base_value import LazyValueWrapper
|
||||
|
||||
|
||||
@@ -16,24 +16,28 @@ def builtin_from_name(inference_state, string):
|
||||
return value
|
||||
|
||||
|
||||
class CompiledValue(LazyValueWrapper):
|
||||
def __init__(self, compiled_obj):
|
||||
self.inference_state = compiled_obj.inference_state
|
||||
self._compiled_obj = compiled_obj
|
||||
class ExactValue(LazyValueWrapper):
|
||||
"""
|
||||
This class represents exact values, that makes operations like additions
|
||||
and exact boolean values possible, while still being a "normal" stub.
|
||||
"""
|
||||
def __init__(self, compiled_value):
|
||||
self.inference_state = compiled_value.inference_state
|
||||
self._compiled_value = compiled_value
|
||||
|
||||
def __getattribute__(self, name):
|
||||
if name in ('get_safe_value', 'execute_operation', 'access_handle',
|
||||
'negate', 'py__bool__', 'is_compiled'):
|
||||
return getattr(self._compiled_obj, name)
|
||||
return super(CompiledValue, self).__getattribute__(name)
|
||||
return getattr(self._compiled_value, name)
|
||||
return super(ExactValue, self).__getattribute__(name)
|
||||
|
||||
def _get_wrapped_value(self):
|
||||
instance, = builtin_from_name(
|
||||
self.inference_state, self._compiled_obj.name.string_name).execute_with_values()
|
||||
self.inference_state, self._compiled_value.name.string_name).execute_with_values()
|
||||
return instance
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._compiled_obj)
|
||||
return '<%s: %s>' % (self.__class__.__name__, self._compiled_value)
|
||||
|
||||
|
||||
def create_simple_object(inference_state, obj):
|
||||
@@ -42,11 +46,11 @@ def create_simple_object(inference_state, obj):
|
||||
versions.
|
||||
"""
|
||||
assert type(obj) in (int, float, str, bytes, unicode, slice, complex, bool), obj
|
||||
compiled_obj = create_from_access_path(
|
||||
compiled_value = create_from_access_path(
|
||||
inference_state,
|
||||
inference_state.compiled_subprocess.create_simple_object(obj)
|
||||
)
|
||||
return CompiledValue(compiled_obj)
|
||||
return ExactValue(compiled_value)
|
||||
|
||||
|
||||
def get_string_value_set(inference_state):
|
||||
|
||||
@@ -113,34 +113,6 @@ def shorten_repr(func):
|
||||
return wrapper
|
||||
|
||||
|
||||
def compiled_objects_cache(attribute_name):
|
||||
def decorator(func):
|
||||
"""
|
||||
This decorator caches just the ids, oopposed to caching the object itself.
|
||||
Caching the id has the advantage that an object doesn't need to be
|
||||
hashable.
|
||||
"""
|
||||
def wrapper(inference_state, obj, parent_context=None):
|
||||
cache = getattr(inference_state, attribute_name)
|
||||
# Do a very cheap form of caching here.
|
||||
key = id(obj)
|
||||
try:
|
||||
cache[key]
|
||||
return cache[key][0]
|
||||
except KeyError:
|
||||
# TODO wuaaaarrghhhhhhhh
|
||||
if attribute_name == 'mixed_cache':
|
||||
result = func(inference_state, obj, parent_context)
|
||||
else:
|
||||
result = func(inference_state, obj)
|
||||
# Need to cache all of them, otherwise the id could be overwritten.
|
||||
cache[key] = result, obj, parent_context
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def create_access(inference_state, obj):
|
||||
return inference_state.compiled_subprocess.get_or_create_access_handle(obj)
|
||||
|
||||
@@ -315,6 +287,9 @@ class DirectObjectAccess(object):
|
||||
def is_class(self):
|
||||
return inspect.isclass(self._obj)
|
||||
|
||||
def is_function(self):
|
||||
return inspect.isfunction(self._obj) or inspect.ismethod(self._obj)
|
||||
|
||||
def is_module(self):
|
||||
return inspect.ismodule(self._obj)
|
||||
|
||||
@@ -509,6 +484,11 @@ class DirectObjectAccess(object):
|
||||
def needs_type_completions(self):
|
||||
return inspect.isclass(self._obj) and self._obj != type
|
||||
|
||||
def _annotation_to_str(self, annotation):
|
||||
if isinstance(annotation, type):
|
||||
return str(annotation.__name__)
|
||||
return str(annotation)
|
||||
|
||||
def get_signature_params(self):
|
||||
return [
|
||||
SignatureParam(
|
||||
@@ -518,7 +498,7 @@ class DirectObjectAccess(object):
|
||||
default_string=repr(p.default),
|
||||
has_annotation=p.annotation is not p.empty,
|
||||
annotation=self._create_access_path(p.annotation),
|
||||
annotation_string=str(p.annotation),
|
||||
annotation_string=self._annotation_to_str(p.annotation),
|
||||
kind_name=str(p.kind)
|
||||
) for p in self._get_signature().parameters.values()
|
||||
]
|
||||
@@ -527,22 +507,6 @@ class DirectObjectAccess(object):
|
||||
obj = self._obj
|
||||
if py_version < 33:
|
||||
raise ValueError("inspect.signature was introduced in 3.3")
|
||||
if py_version == 34:
|
||||
# In 3.4 inspect.signature are wrong for str and int. This has
|
||||
# been fixed in 3.5. The signature of object is returned,
|
||||
# because no signature was found for str. Here we imitate 3.5
|
||||
# logic and just ignore the signature if the magic methods
|
||||
# don't match object.
|
||||
# 3.3 doesn't even have the logic and returns nothing for str
|
||||
# and classes that inherit from object.
|
||||
user_def = inspect._signature_get_user_defined_method
|
||||
if (inspect.isclass(obj)
|
||||
and not user_def(type(obj), '__init__')
|
||||
and not user_def(type(obj), '__new__')
|
||||
and (obj.__init__ != object.__init__
|
||||
or obj.__new__ != object.__new__)):
|
||||
raise ValueError
|
||||
|
||||
try:
|
||||
return inspect.signature(obj)
|
||||
except (RuntimeError, TypeError):
|
||||
@@ -595,4 +559,6 @@ def _is_class_instance(obj):
|
||||
except AttributeError:
|
||||
return False
|
||||
else:
|
||||
return cls != type and not issubclass(cls, NOT_CLASS_TYPES)
|
||||
# The isinstance check for cls is just there so issubclass doesn't
|
||||
# raise an exception.
|
||||
return cls != type and isinstance(cls, type) and not issubclass(cls, NOT_CLASS_TYPES)
|
||||
|
||||
@@ -6,6 +6,7 @@ information returned to enable Jedi to make decisions.
|
||||
|
||||
import types
|
||||
|
||||
from jedi import debug
|
||||
from jedi._compatibility import py_version
|
||||
|
||||
_sentinel = object()
|
||||
@@ -54,7 +55,14 @@ def _shadowed_dict_newstyle(klass):
|
||||
|
||||
|
||||
def _static_getmro_newstyle(klass):
|
||||
return type.__dict__['__mro__'].__get__(klass)
|
||||
mro = type.__dict__['__mro__'].__get__(klass)
|
||||
if not isinstance(mro, (tuple, list)):
|
||||
# There are unfortunately no tests for this, I was not able to
|
||||
# reproduce this in pure Python. However should still solve the issue
|
||||
# raised in GH #1517.
|
||||
debug.warning('mro of %s returned %s, should be a tuple' % (klass, mro))
|
||||
return ()
|
||||
return mro
|
||||
|
||||
|
||||
if py_version >= 30:
|
||||
|
||||
@@ -10,17 +10,15 @@ from jedi.parser_utils import get_cached_code_lines
|
||||
|
||||
from jedi._compatibility import unwrap
|
||||
from jedi import settings
|
||||
from jedi.cache import memoize_method
|
||||
from jedi.inference import compiled
|
||||
from jedi.cache import underscore_memoization
|
||||
from jedi.file_io import FileIO
|
||||
from jedi.inference.names import NameWrapper
|
||||
from jedi.inference.base_value import ValueSet, ValueWrapper, NO_VALUES
|
||||
from jedi.inference.helpers import SimpleGetItemNotFound
|
||||
from jedi.inference.value import ModuleValue
|
||||
from jedi.inference.cache import inference_state_function_cache, \
|
||||
inference_state_method_cache
|
||||
from jedi.inference.compiled.access import compiled_objects_cache, \
|
||||
ALLOWED_GETITEM_TYPES, get_api_type
|
||||
from jedi.inference.compiled.value import create_cached_compiled_object
|
||||
from jedi.inference.compiled.access import ALLOWED_GETITEM_TYPES, get_api_type
|
||||
from jedi.inference.gradual.conversion import to_stub
|
||||
from jedi.inference.context import CompiledContext, CompiledModuleContext, \
|
||||
TreeContextMixin
|
||||
@@ -33,30 +31,31 @@ class MixedObject(ValueWrapper):
|
||||
A ``MixedObject`` is used in two ways:
|
||||
|
||||
1. It uses the default logic of ``parser.python.tree`` objects,
|
||||
2. except for getattr calls. The names dicts are generated in a fashion
|
||||
like ``CompiledObject``.
|
||||
2. except for getattr calls and signatures. The names dicts are generated
|
||||
in a fashion like ``CompiledValue``.
|
||||
|
||||
This combined logic makes it possible to provide more powerful REPL
|
||||
completion. It allows side effects that are not noticable with the default
|
||||
parser structure to still be completeable.
|
||||
|
||||
The biggest difference from CompiledObject to MixedObject is that we are
|
||||
The biggest difference from CompiledValue to MixedObject is that we are
|
||||
generally dealing with Python code and not with C code. This will generate
|
||||
fewer special cases, because we in Python you don't have the same freedoms
|
||||
to modify the runtime.
|
||||
"""
|
||||
def __init__(self, compiled_object, tree_value):
|
||||
def __init__(self, compiled_value, tree_value):
|
||||
super(MixedObject, self).__init__(tree_value)
|
||||
self.compiled_object = compiled_object
|
||||
self.access_handle = compiled_object.access_handle
|
||||
self.compiled_value = compiled_value
|
||||
self.access_handle = compiled_value.access_handle
|
||||
|
||||
def get_filters(self, *args, **kwargs):
|
||||
yield MixedObjectFilter(self.inference_state, self)
|
||||
yield MixedObjectFilter(
|
||||
self.inference_state, self.compiled_value, self._wrapped_value)
|
||||
|
||||
def get_signatures(self):
|
||||
# Prefer `inspect.signature` over somehow analyzing Python code. It
|
||||
# should be very precise, especially for stuff like `partial`.
|
||||
return self.compiled_object.get_signatures()
|
||||
return self.compiled_value.get_signatures()
|
||||
|
||||
@inference_state_method_cache(default=NO_VALUES)
|
||||
def py__call__(self, arguments):
|
||||
@@ -68,15 +67,15 @@ class MixedObject(ValueWrapper):
|
||||
|
||||
def get_safe_value(self, default=_sentinel):
|
||||
if default is _sentinel:
|
||||
return self.compiled_object.get_safe_value()
|
||||
return self.compiled_value.get_safe_value()
|
||||
else:
|
||||
return self.compiled_object.get_safe_value(default)
|
||||
return self.compiled_value.get_safe_value(default)
|
||||
|
||||
def py__simple_getitem__(self, index):
|
||||
python_object = self.compiled_object.access_handle.access._obj
|
||||
python_object = self.compiled_value.access_handle.access._obj
|
||||
if type(python_object) in ALLOWED_GETITEM_TYPES:
|
||||
return self.compiled_object.py__simple_getitem__(index)
|
||||
raise SimpleGetItemNotFound
|
||||
return self.compiled_value.py__simple_getitem__(index)
|
||||
return self._wrapped_value.py__simple_getitem__(index)
|
||||
|
||||
def _as_context(self):
|
||||
if self.parent_context is None:
|
||||
@@ -84,26 +83,31 @@ class MixedObject(ValueWrapper):
|
||||
return MixedContext(self)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (
|
||||
return '<%s: %s; %s>' % (
|
||||
type(self).__name__,
|
||||
self.access_handle.get_repr()
|
||||
self.access_handle.get_repr(),
|
||||
self._wrapped_value,
|
||||
)
|
||||
|
||||
|
||||
class MixedContext(CompiledContext, TreeContextMixin):
|
||||
@property
|
||||
def compiled_object(self):
|
||||
return self._value.compiled_object
|
||||
def compiled_value(self):
|
||||
return self._value.compiled_value
|
||||
|
||||
|
||||
class MixedModuleContext(CompiledModuleContext, MixedContext):
|
||||
pass
|
||||
|
||||
|
||||
class MixedName(compiled.CompiledName):
|
||||
class MixedName(NameWrapper):
|
||||
"""
|
||||
The ``CompiledName._compiled_object`` is our MixedObject.
|
||||
The ``CompiledName._compiled_value`` is our MixedObject.
|
||||
"""
|
||||
def __init__(self, wrapped_name, parent_tree_value):
|
||||
super(MixedName, self).__init__(wrapped_name)
|
||||
self._parent_tree_value = parent_tree_value
|
||||
|
||||
@property
|
||||
def start_pos(self):
|
||||
values = list(self.infer())
|
||||
@@ -112,56 +116,39 @@ class MixedName(compiled.CompiledName):
|
||||
return 0, 0
|
||||
return values[0].name.start_pos
|
||||
|
||||
@underscore_memoization
|
||||
@memoize_method
|
||||
def infer(self):
|
||||
def access_to_value(parent_value, access):
|
||||
if parent_value is None:
|
||||
parent_context = None
|
||||
else:
|
||||
assert parent_value is not None
|
||||
parent_context = parent_value.as_context()
|
||||
compiled_value = self._wrapped_name.infer_compiled_value()
|
||||
tree_value = self._parent_tree_value
|
||||
if tree_value.is_instance() or tree_value.is_class():
|
||||
tree_values = tree_value.py__getattribute__(self.string_name)
|
||||
if compiled_value.is_function():
|
||||
return ValueSet({MixedObject(compiled_value, v) for v in tree_values})
|
||||
|
||||
if parent_context is None or isinstance(parent_context, MixedContext):
|
||||
return _create(self._inference_state, access, parent_context=parent_context)
|
||||
else:
|
||||
return ValueSet({
|
||||
create_cached_compiled_object(
|
||||
parent_context.inference_state, access, parent_context
|
||||
)
|
||||
})
|
||||
module_context = tree_value.get_root_context()
|
||||
return _create(self._inference_state, compiled_value, module_context)
|
||||
|
||||
# TODO use logic from compiled.CompiledObjectFilter
|
||||
access_paths = self._parent_value.access_handle.getattr_paths(
|
||||
self.string_name,
|
||||
default=None
|
||||
|
||||
class MixedObjectFilter(compiled.CompiledValueFilter):
|
||||
def __init__(self, inference_state, compiled_value, tree_value):
|
||||
super(MixedObjectFilter, self).__init__(inference_state, compiled_value)
|
||||
self._tree_value = tree_value
|
||||
|
||||
def _create_name(self, name):
|
||||
return MixedName(
|
||||
super(MixedObjectFilter, self)._create_name(name),
|
||||
self._tree_value,
|
||||
)
|
||||
assert len(access_paths)
|
||||
values = [None]
|
||||
for access in access_paths:
|
||||
values = ValueSet.from_sets(access_to_value(v, access) for v in values)
|
||||
return values
|
||||
|
||||
@property
|
||||
def api_type(self):
|
||||
return next(iter(self.infer())).api_type
|
||||
|
||||
|
||||
class MixedObjectFilter(compiled.CompiledObjectFilter):
|
||||
name_class = MixedName
|
||||
|
||||
|
||||
@inference_state_function_cache()
|
||||
def _load_module(inference_state, path):
|
||||
module_node = inference_state.parse(
|
||||
return inference_state.parse(
|
||||
path=path,
|
||||
cache=True,
|
||||
diff_cache=settings.fast_parser,
|
||||
cache_path=settings.cache_directory
|
||||
).get_root_node()
|
||||
# python_module = inspect.getmodule(python_object)
|
||||
# TODO we should actually make something like this possible.
|
||||
# inference_state.modules[python_module.__name__] = module_node
|
||||
return module_node
|
||||
|
||||
|
||||
def _get_object_to_check(python_object):
|
||||
@@ -262,60 +249,46 @@ def _find_syntax_node_name(inference_state, python_object):
|
||||
return module_node, tree_node, file_io, code_lines
|
||||
|
||||
|
||||
@compiled_objects_cache('mixed_cache')
|
||||
def _create(inference_state, access_handle, parent_context, *args):
|
||||
compiled_object = create_cached_compiled_object(
|
||||
inference_state,
|
||||
access_handle,
|
||||
# TODO It looks like we have to use the compiled object as a parent context.
|
||||
# Why is that?
|
||||
parent_context=None if parent_context is None
|
||||
else parent_context.compiled_object.as_context() # noqa
|
||||
)
|
||||
|
||||
@inference_state_function_cache()
|
||||
def _create(inference_state, compiled_value, module_context):
|
||||
# TODO accessing this is bad, but it probably doesn't matter that much,
|
||||
# because we're working with interpreteters only here.
|
||||
python_object = access_handle.access._obj
|
||||
python_object = compiled_value.access_handle.access._obj
|
||||
result = _find_syntax_node_name(inference_state, python_object)
|
||||
if result is None:
|
||||
# TODO Care about generics from stuff like `[1]` and don't return like this.
|
||||
if type(python_object) in (dict, list, tuple):
|
||||
return ValueSet({compiled_object})
|
||||
return ValueSet({compiled_value})
|
||||
|
||||
tree_values = to_stub(compiled_object)
|
||||
tree_values = to_stub(compiled_value)
|
||||
if not tree_values:
|
||||
return ValueSet({compiled_object})
|
||||
return ValueSet({compiled_value})
|
||||
else:
|
||||
module_node, tree_node, file_io, code_lines = result
|
||||
|
||||
if parent_context is None:
|
||||
# TODO this __name__ is probably wrong.
|
||||
name = compiled_object.get_root_context().py__name__()
|
||||
if module_context is None or module_context.tree_node != module_node:
|
||||
root_compiled_value = compiled_value.get_root_context().get_value()
|
||||
# TODO this __name__ might be wrong.
|
||||
name = root_compiled_value.py__name__()
|
||||
string_names = tuple(name.split('.'))
|
||||
module_context = ModuleValue(
|
||||
module_value = ModuleValue(
|
||||
inference_state, module_node,
|
||||
file_io=file_io,
|
||||
string_names=string_names,
|
||||
code_lines=code_lines,
|
||||
is_package=compiled_object.is_package(),
|
||||
).as_context()
|
||||
is_package=root_compiled_value.is_package(),
|
||||
)
|
||||
if name is not None:
|
||||
inference_state.module_cache.add(string_names, ValueSet([module_context]))
|
||||
else:
|
||||
if parent_context.tree_node.get_root_node() != module_node:
|
||||
# This happens e.g. when __module__ is wrong, or when using
|
||||
# TypeVar('foo'), where Jedi uses 'foo' as the name and
|
||||
# Python's TypeVar('foo').__module__ will be typing.
|
||||
return ValueSet({compiled_object})
|
||||
module_context = parent_context.get_root_context()
|
||||
inference_state.module_cache.add(string_names, ValueSet([module_value]))
|
||||
module_context = module_value.as_context()
|
||||
|
||||
tree_values = ValueSet({module_context.create_value(tree_node)})
|
||||
if tree_node.type == 'classdef':
|
||||
if not access_handle.is_class():
|
||||
if not compiled_value.is_class():
|
||||
# Is an instance, not a class.
|
||||
tree_values = tree_values.execute_with_values()
|
||||
|
||||
return ValueSet(
|
||||
MixedObject(compiled_object, tree_value=tree_value)
|
||||
MixedObject(compiled_value, tree_value=tree_value)
|
||||
for tree_value in tree_values
|
||||
)
|
||||
|
||||
@@ -181,6 +181,15 @@ class CompiledSubprocess(object):
|
||||
os.path.dirname(os.path.dirname(parso_path)),
|
||||
'.'.join(str(x) for x in sys.version_info[:3]),
|
||||
)
|
||||
# Use explicit envionment to ensure reliable results (#1540)
|
||||
env = {}
|
||||
if os.name == 'nt':
|
||||
# if SYSTEMROOT (or case variant) exists in environment,
|
||||
# ensure it goes to subprocess
|
||||
for k, v in os.environ.items():
|
||||
if 'SYSTEMROOT' == k.upper():
|
||||
env.update({k: os.environ[k]})
|
||||
break # don't risk multiple entries
|
||||
process = GeneralizedPopen(
|
||||
args,
|
||||
stdin=subprocess.PIPE,
|
||||
@@ -188,7 +197,8 @@ class CompiledSubprocess(object):
|
||||
stderr=subprocess.PIPE,
|
||||
# Use system default buffering on Python 2 to improve performance
|
||||
# (this is already the case on Python 3).
|
||||
bufsize=-1
|
||||
bufsize=-1,
|
||||
env=env
|
||||
)
|
||||
self._stderr_queue = Queue()
|
||||
self._stderr_thread = t = Thread(
|
||||
@@ -297,7 +307,7 @@ class Listener(object):
|
||||
try:
|
||||
inference_state = self._inference_states[inference_state_id]
|
||||
except KeyError:
|
||||
from jedi.api.environment import InterpreterEnvironment
|
||||
from jedi import InterpreterEnvironment
|
||||
inference_state = InferenceState(
|
||||
# The project is not actually needed. Nothing should need to
|
||||
# access it.
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import inspect
|
||||
|
||||
from jedi._compatibility import find_module, cast_path, force_unicode, \
|
||||
iter_modules, all_suffixes
|
||||
all_suffixes, scandir
|
||||
from jedi.inference.compiled import access
|
||||
from jedi import debug
|
||||
from jedi import parser_utils
|
||||
|
||||
|
||||
@@ -40,13 +43,6 @@ def get_module_info(inference_state, sys_path=None, full_name=None, **kwargs):
|
||||
sys.path = temp
|
||||
|
||||
|
||||
def list_module_names(inference_state, search_path):
|
||||
return [
|
||||
force_unicode(name)
|
||||
for module_loader, name, is_pkg in iter_modules(search_path)
|
||||
]
|
||||
|
||||
|
||||
def get_builtin_module_names(inference_state):
|
||||
return list(map(force_unicode, sys.builtin_module_names))
|
||||
|
||||
@@ -84,3 +80,36 @@ def _get_init_path(directory_path):
|
||||
|
||||
def safe_literal_eval(inference_state, value):
|
||||
return parser_utils.safe_literal_eval(value)
|
||||
|
||||
|
||||
def iter_module_names(*args, **kwargs):
|
||||
return list(_iter_module_names(*args, **kwargs))
|
||||
|
||||
|
||||
def _iter_module_names(inference_state, paths):
|
||||
# Python modules/packages
|
||||
for path in paths:
|
||||
try:
|
||||
dirs = scandir(path)
|
||||
except OSError:
|
||||
# The file might not exist or reading it might lead to an error.
|
||||
debug.warning("Not possible to list directory: %s", path)
|
||||
continue
|
||||
for dir_entry in dirs:
|
||||
name = dir_entry.name
|
||||
# First Namespaces then modules/stubs
|
||||
if dir_entry.is_dir():
|
||||
# pycache is obviously not an interestin namespace. Also the
|
||||
# name must be a valid identifier.
|
||||
# TODO use str.isidentifier, once Python 2 is removed
|
||||
if name != '__pycache__' and not re.search(r'\W|^\d', name):
|
||||
yield name
|
||||
else:
|
||||
if name.endswith('.pyi'): # Stub files
|
||||
modname = name[:-4]
|
||||
else:
|
||||
modname = inspect.getmodulename(name)
|
||||
|
||||
if modname and '.' not in modname:
|
||||
if modname != '__init__':
|
||||
yield modname
|
||||
|
||||
@@ -7,7 +7,7 @@ from functools import partial
|
||||
from jedi import debug
|
||||
from jedi.inference.utils import to_list
|
||||
from jedi._compatibility import force_unicode, Parameter, cast_path
|
||||
from jedi.cache import underscore_memoization, memoize_method
|
||||
from jedi.cache import memoize_method
|
||||
from jedi.inference.filters import AbstractFilter
|
||||
from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \
|
||||
ParamNameInterface
|
||||
@@ -21,7 +21,7 @@ from jedi.inference.context import CompiledContext, CompiledModuleContext
|
||||
|
||||
|
||||
class CheckAttribute(object):
|
||||
"""Raises an AttributeError if the attribute X isn't available."""
|
||||
"""Raises :exc:`AttributeError` if the attribute X is not available."""
|
||||
def __init__(self, check_name=None):
|
||||
# Remove the py in front of e.g. py__call__.
|
||||
self.check_name = check_name
|
||||
@@ -41,9 +41,9 @@ class CheckAttribute(object):
|
||||
return partial(self.func, instance)
|
||||
|
||||
|
||||
class CompiledObject(Value):
|
||||
class CompiledValue(Value):
|
||||
def __init__(self, inference_state, access_handle, parent_context=None):
|
||||
super(CompiledObject, self).__init__(inference_state, parent_context)
|
||||
super(CompiledValue, self).__init__(inference_state, parent_context)
|
||||
self.access_handle = access_handle
|
||||
|
||||
def py__call__(self, arguments):
|
||||
@@ -58,7 +58,7 @@ class CompiledObject(Value):
|
||||
try:
|
||||
self.access_handle.getattr_paths(u'__call__')
|
||||
except AttributeError:
|
||||
return super(CompiledObject, self).py__call__(arguments)
|
||||
return super(CompiledValue, self).py__call__(arguments)
|
||||
else:
|
||||
if self.access_handle.is_class():
|
||||
from jedi.inference.value import CompiledInstance
|
||||
@@ -86,35 +86,18 @@ class CompiledObject(Value):
|
||||
for access in self.access_handle.py__bases__()
|
||||
)
|
||||
|
||||
def py__path__(self):
|
||||
paths = self.access_handle.py__path__()
|
||||
if paths is None:
|
||||
return None
|
||||
return map(cast_path, paths)
|
||||
|
||||
def is_package(self):
|
||||
return self.py__path__() is not None
|
||||
|
||||
@property
|
||||
def string_names(self):
|
||||
# For modules
|
||||
name = self.py__name__()
|
||||
if name is None:
|
||||
return ()
|
||||
return tuple(name.split('.'))
|
||||
|
||||
def get_qualified_names(self):
|
||||
return self.access_handle.get_qualified_names()
|
||||
|
||||
def py__bool__(self):
|
||||
return self.access_handle.py__bool__()
|
||||
|
||||
def py__file__(self):
|
||||
return cast_path(self.access_handle.py__file__())
|
||||
|
||||
def is_class(self):
|
||||
return self.access_handle.is_class()
|
||||
|
||||
def is_function(self):
|
||||
return self.access_handle.is_function()
|
||||
|
||||
def is_module(self):
|
||||
return self.access_handle.is_module()
|
||||
|
||||
@@ -156,7 +139,7 @@ class CompiledObject(Value):
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.access_handle.get_repr())
|
||||
|
||||
@underscore_memoization
|
||||
@memoize_method
|
||||
def _parse_function_doc(self):
|
||||
doc = self.py__doc__()
|
||||
if doc is None:
|
||||
@@ -168,28 +151,19 @@ class CompiledObject(Value):
|
||||
def api_type(self):
|
||||
return self.access_handle.get_api_type()
|
||||
|
||||
@underscore_memoization
|
||||
def _cls(self):
|
||||
"""
|
||||
We used to limit the lookups for instantiated objects like list(), but
|
||||
this is not the case anymore. Python itself
|
||||
"""
|
||||
# Ensures that a CompiledObject is returned that is not an instance (like list)
|
||||
return self
|
||||
|
||||
def get_filters(self, is_instance=False, origin_scope=None):
|
||||
yield self._ensure_one_filter(is_instance)
|
||||
|
||||
@memoize_method
|
||||
def _ensure_one_filter(self, is_instance):
|
||||
return CompiledObjectFilter(self.inference_state, self, is_instance)
|
||||
return CompiledValueFilter(self.inference_state, self, is_instance)
|
||||
|
||||
def py__simple_getitem__(self, index):
|
||||
with reraise_getitem_errors(IndexError, KeyError, TypeError):
|
||||
try:
|
||||
access = self.access_handle.py__simple_getitem__(index)
|
||||
except AttributeError:
|
||||
return super(CompiledObject, self).py__simple_getitem__(index)
|
||||
return super(CompiledValue, self).py__simple_getitem__(index)
|
||||
if access is None:
|
||||
return NO_VALUES
|
||||
|
||||
@@ -200,7 +174,7 @@ class CompiledObject(Value):
|
||||
if all_access_paths is None:
|
||||
# This means basically that no __getitem__ has been defined on this
|
||||
# object.
|
||||
return super(CompiledObject, self).py__getitem__(index_value_set, contextualized_node)
|
||||
return super(CompiledValue, self).py__getitem__(index_value_set, contextualized_node)
|
||||
return ValueSet(
|
||||
create_from_access_path(self.inference_state, access)
|
||||
for access in all_access_paths
|
||||
@@ -212,7 +186,7 @@ class CompiledObject(Value):
|
||||
# just start with __getitem__(0). This is especially true for
|
||||
# Python 2 strings, where `str.__iter__` is not even defined.
|
||||
if not self.access_handle.has_iter():
|
||||
for x in super(CompiledObject, self).py__iter__(contextualized_node):
|
||||
for x in super(CompiledValue, self).py__iter__(contextualized_node):
|
||||
yield x
|
||||
|
||||
access_path_list = self.access_handle.py__iter__list()
|
||||
@@ -290,7 +264,7 @@ class CompiledObject(Value):
|
||||
v.with_generics(arguments)
|
||||
for v in self.inference_state.typing_module.py__getattribute__(name)
|
||||
]).execute_annotation()
|
||||
return super(CompiledObject, self).execute_annotation()
|
||||
return super(CompiledValue, self).execute_annotation()
|
||||
|
||||
def negate(self):
|
||||
return create_from_access_path(self.inference_state, self.access_handle.negate())
|
||||
@@ -298,11 +272,7 @@ class CompiledObject(Value):
|
||||
def get_metaclasses(self):
|
||||
return NO_VALUES
|
||||
|
||||
file_io = None # For modules
|
||||
|
||||
def _as_context(self):
|
||||
if self.parent_context is None:
|
||||
return CompiledModuleContext(self)
|
||||
return CompiledContext(self)
|
||||
|
||||
@property
|
||||
@@ -315,6 +285,38 @@ class CompiledObject(Value):
|
||||
for k in self.access_handle.get_key_paths()
|
||||
]
|
||||
|
||||
def get_type_hint(self, add_class_info=True):
|
||||
if self.access_handle.get_repr() in ('None', "<class 'NoneType'>"):
|
||||
return 'None'
|
||||
return None
|
||||
|
||||
|
||||
class CompiledModule(CompiledValue):
|
||||
file_io = None # For modules
|
||||
|
||||
def _as_context(self):
|
||||
return CompiledModuleContext(self)
|
||||
|
||||
def py__path__(self):
|
||||
paths = self.access_handle.py__path__()
|
||||
if paths is None:
|
||||
return None
|
||||
return map(cast_path, paths)
|
||||
|
||||
def is_package(self):
|
||||
return self.py__path__() is not None
|
||||
|
||||
@property
|
||||
def string_names(self):
|
||||
# For modules
|
||||
name = self.py__name__()
|
||||
if name is None:
|
||||
return ()
|
||||
return tuple(name.split('.'))
|
||||
|
||||
def py__file__(self):
|
||||
return cast_path(self.access_handle.py__file__())
|
||||
|
||||
|
||||
class CompiledName(AbstractNameDefinition):
|
||||
def __init__(self, inference_state, parent_value, name):
|
||||
@@ -355,16 +357,17 @@ class CompiledName(AbstractNameDefinition):
|
||||
return "instance"
|
||||
return next(iter(api)).api_type
|
||||
|
||||
@underscore_memoization
|
||||
@memoize_method
|
||||
def infer(self):
|
||||
return ValueSet([_create_from_name(
|
||||
self._inference_state, self._parent_value, self.string_name
|
||||
)])
|
||||
return ValueSet([self.infer_compiled_value()])
|
||||
|
||||
def infer_compiled_value(self):
|
||||
return create_from_name(self._inference_state, self._parent_value, self.string_name)
|
||||
|
||||
|
||||
class SignatureParamName(ParamNameInterface, AbstractNameDefinition):
|
||||
def __init__(self, compiled_obj, signature_param):
|
||||
self.parent_context = compiled_obj.parent_context
|
||||
def __init__(self, compiled_value, signature_param):
|
||||
self.parent_context = compiled_value.parent_context
|
||||
self._signature_param = signature_param
|
||||
|
||||
@property
|
||||
@@ -395,8 +398,8 @@ class SignatureParamName(ParamNameInterface, AbstractNameDefinition):
|
||||
|
||||
|
||||
class UnresolvableParamName(ParamNameInterface, AbstractNameDefinition):
|
||||
def __init__(self, compiled_obj, name, default):
|
||||
self.parent_context = compiled_obj.parent_context
|
||||
def __init__(self, compiled_value, name, default):
|
||||
self.parent_context = compiled_value.parent_context
|
||||
self.string_name = name
|
||||
self._default = default
|
||||
|
||||
@@ -434,16 +437,14 @@ class EmptyCompiledName(AbstractNameDefinition):
|
||||
return NO_VALUES
|
||||
|
||||
|
||||
class CompiledObjectFilter(AbstractFilter):
|
||||
name_class = CompiledName
|
||||
|
||||
def __init__(self, inference_state, compiled_object, is_instance=False):
|
||||
class CompiledValueFilter(AbstractFilter):
|
||||
def __init__(self, inference_state, compiled_value, is_instance=False):
|
||||
self._inference_state = inference_state
|
||||
self.compiled_object = compiled_object
|
||||
self.compiled_value = compiled_value
|
||||
self.is_instance = is_instance
|
||||
|
||||
def get(self, name):
|
||||
access_handle = self.compiled_object.access_handle
|
||||
access_handle = self.compiled_value.access_handle
|
||||
return self._get(
|
||||
name,
|
||||
lambda name, unsafe: access_handle.is_allowed_getattr(name, unsafe),
|
||||
@@ -486,7 +487,7 @@ class CompiledObjectFilter(AbstractFilter):
|
||||
def values(self):
|
||||
from jedi.inference.compiled import builtin_from_name
|
||||
names = []
|
||||
needs_type_completions, dir_infos = self.compiled_object.access_handle.get_dir_infos()
|
||||
needs_type_completions, dir_infos = self.compiled_value.access_handle.get_dir_infos()
|
||||
# We could use `unsafe` here as well, especially as a parameter to
|
||||
# get_dir_infos. But this would lead to a lot of property executions
|
||||
# that are probably not wanted. The drawback for this is that we
|
||||
@@ -506,14 +507,14 @@ class CompiledObjectFilter(AbstractFilter):
|
||||
return names
|
||||
|
||||
def _create_name(self, name):
|
||||
return self.name_class(
|
||||
return CompiledName(
|
||||
self._inference_state,
|
||||
self.compiled_object,
|
||||
self.compiled_value,
|
||||
name
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (self.__class__.__name__, self.compiled_object)
|
||||
return "<%s: %s>" % (self.__class__.__name__, self.compiled_value)
|
||||
|
||||
|
||||
docstr_defaults = {
|
||||
@@ -586,15 +587,12 @@ def _parse_function_doc(doc):
|
||||
return param_str, ret
|
||||
|
||||
|
||||
def _create_from_name(inference_state, compiled_object, name):
|
||||
access_paths = compiled_object.access_handle.getattr_paths(name, default=None)
|
||||
parent_context = compiled_object
|
||||
if parent_context.is_class():
|
||||
parent_context = parent_context.parent_context
|
||||
def create_from_name(inference_state, compiled_value, name):
|
||||
access_paths = compiled_value.access_handle.getattr_paths(name, default=None)
|
||||
|
||||
value = None
|
||||
for access_path in access_paths:
|
||||
value = create_cached_compiled_object(
|
||||
value = create_cached_compiled_value(
|
||||
inference_state,
|
||||
access_path,
|
||||
parent_context=None if value is None else value.as_context(),
|
||||
@@ -612,7 +610,7 @@ def _normalize_create_args(func):
|
||||
def create_from_access_path(inference_state, access_path):
|
||||
value = None
|
||||
for name, access in access_path.accesses:
|
||||
value = create_cached_compiled_object(
|
||||
value = create_cached_compiled_value(
|
||||
inference_state,
|
||||
access,
|
||||
parent_context=None if value is None else value.as_context()
|
||||
@@ -622,6 +620,10 @@ def create_from_access_path(inference_state, access_path):
|
||||
|
||||
@_normalize_create_args
|
||||
@inference_state_function_cache()
|
||||
def create_cached_compiled_object(inference_state, access_handle, parent_context):
|
||||
assert not isinstance(parent_context, CompiledObject)
|
||||
return CompiledObject(inference_state, access_handle, parent_context)
|
||||
def create_cached_compiled_value(inference_state, access_handle, parent_context):
|
||||
assert not isinstance(parent_context, CompiledValue)
|
||||
if parent_context is None:
|
||||
cls = CompiledModule
|
||||
else:
|
||||
cls = CompiledValue
|
||||
return cls(inference_state, access_handle, parent_context)
|
||||
|
||||
@@ -129,6 +129,9 @@ class AbstractContext(object):
|
||||
def is_compiled(self):
|
||||
return False
|
||||
|
||||
def is_bound_method(self):
|
||||
return False
|
||||
|
||||
@abstractmethod
|
||||
def py__name__(self):
|
||||
raise NotImplementedError
|
||||
@@ -190,6 +193,9 @@ class ValueContext(AbstractContext):
|
||||
def is_compiled(self):
|
||||
return self._value.is_compiled()
|
||||
|
||||
def is_bound_method(self):
|
||||
return self._value.is_bound_method()
|
||||
|
||||
def py__name__(self):
|
||||
return self._value.py__name__()
|
||||
|
||||
@@ -308,7 +314,7 @@ class ModuleContext(TreeContextMixin, ValueContext):
|
||||
def get_filters(self, until_position=None, origin_scope=None):
|
||||
filters = self._value.get_filters(origin_scope)
|
||||
# Skip the first filter and replace it.
|
||||
next(filters)
|
||||
next(filters, None)
|
||||
yield MergedFilter(
|
||||
ParserTreeFilter(
|
||||
parent_context=self,
|
||||
@@ -347,6 +353,10 @@ class NamespaceContext(TreeContextMixin, ValueContext):
|
||||
def get_value(self):
|
||||
return self._value
|
||||
|
||||
@property
|
||||
def string_names(self):
|
||||
return self._value.string_names
|
||||
|
||||
def py__file__(self):
|
||||
return self._value.py__file__()
|
||||
|
||||
@@ -484,5 +494,7 @@ def get_global_filters(context, until_position, origin_scope):
|
||||
|
||||
context = context.parent_context
|
||||
|
||||
b = next(base_context.inference_state.builtins_module.get_filters(), None)
|
||||
assert b is not None
|
||||
# Add builtins to the global scope.
|
||||
yield next(base_context.inference_state.builtins_module.get_filters())
|
||||
yield b
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""
|
||||
Docstrings are another source of information for functions and classes.
|
||||
:mod:`jedi.inference.dynamic` tries to find all executions of functions, while
|
||||
the docstring parsing is much easier. There are three different types of
|
||||
:mod:`jedi.inference.dynamic_params` tries to find all executions of functions,
|
||||
while the docstring parsing is much easier. There are three different types of
|
||||
docstrings that |jedi| understands:
|
||||
|
||||
- `Sphinx <http://sphinx-doc.org/markup/desc.html#info-field-lists>`_
|
||||
@@ -23,7 +23,7 @@ from parso import parse, ParserSyntaxError
|
||||
|
||||
from jedi._compatibility import u
|
||||
from jedi import debug
|
||||
from jedi.inference.utils import indent_block
|
||||
from jedi.common.utils import indent_block
|
||||
from jedi.inference.cache import inference_state_method_cache
|
||||
from jedi.inference.base_value import iterator_to_value_set, ValueSet, \
|
||||
NO_VALUES
|
||||
|
||||
@@ -48,7 +48,6 @@ def _avoid_recursions(func):
|
||||
finally:
|
||||
inf.dynamic_params_depth -= 1
|
||||
return NO_VALUES
|
||||
return
|
||||
return wrapper
|
||||
|
||||
|
||||
|
||||
@@ -77,22 +77,20 @@ class AbstractUsedNamesFilter(AbstractFilter):
|
||||
self._used_names = self._module_node.get_used_names()
|
||||
self.parent_context = parent_context
|
||||
|
||||
def get(self, name, **filter_kwargs):
|
||||
def get(self, name):
|
||||
return self._convert_names(self._filter(
|
||||
_get_definition_names(self._used_names, name),
|
||||
**filter_kwargs
|
||||
))
|
||||
|
||||
def _convert_names(self, names):
|
||||
return [self.name_class(self.parent_context, name) for name in names]
|
||||
|
||||
def values(self, **filter_kwargs):
|
||||
def values(self):
|
||||
return self._convert_names(
|
||||
name
|
||||
for name_key in self._used_names
|
||||
for name in self._filter(
|
||||
_get_definition_names(self._used_names, name_key),
|
||||
**filter_kwargs
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ from parso.python.tree import Name
|
||||
|
||||
from jedi import settings
|
||||
from jedi.inference.arguments import TreeArguments
|
||||
from jedi.inference import helpers
|
||||
from jedi.inference.value import iterable
|
||||
from jedi.inference.base_value import NO_VALUES
|
||||
from jedi.parser_utils import is_scope
|
||||
@@ -38,7 +37,17 @@ def filter_name(filters, name_or_str):
|
||||
if names:
|
||||
break
|
||||
|
||||
return list(names)
|
||||
return list(_remove_del_stmt(names))
|
||||
|
||||
|
||||
def _remove_del_stmt(names):
|
||||
# Catch del statements and remove them from results.
|
||||
for name in names:
|
||||
if name.tree_name is not None:
|
||||
definition = name.tree_name.get_definition()
|
||||
if definition is not None and definition.type == 'del_stmt':
|
||||
continue
|
||||
yield name
|
||||
|
||||
|
||||
def check_flow_information(value, flow, search_name, pos):
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
"""
|
||||
It is unfortunately not well documented how stubs and annotations work in Jedi.
|
||||
If somebody needs an introduction, please let me know.
|
||||
"""
|
||||
|
||||
@@ -14,7 +14,6 @@ from jedi.inference.cache import inference_state_method_cache
|
||||
from jedi.inference.base_value import ValueSet, NO_VALUES
|
||||
from jedi.inference.gradual.base import DefineGenericBase, GenericClass
|
||||
from jedi.inference.gradual.generics import TupleGenericManager
|
||||
from jedi.inference.gradual.typing import TypingClassValueWithIndex
|
||||
from jedi.inference.gradual.type_var import TypeVar
|
||||
from jedi.inference.helpers import is_string
|
||||
from jedi.inference.compiled import builtin_from_name
|
||||
@@ -111,7 +110,7 @@ def _split_comment_param_declaration(decl_text):
|
||||
@inference_state_method_cache()
|
||||
def infer_param(function_value, param, ignore_stars=False):
|
||||
values = _infer_param(function_value, param)
|
||||
if ignore_stars:
|
||||
if ignore_stars or not values:
|
||||
return values
|
||||
inference_state = function_value.inference_state
|
||||
if param.star_count == 1:
|
||||
@@ -119,7 +118,7 @@ def infer_param(function_value, param, ignore_stars=False):
|
||||
return ValueSet([GenericClass(
|
||||
tuple_,
|
||||
TupleGenericManager((values,)),
|
||||
) for c in values])
|
||||
)])
|
||||
elif param.star_count == 2:
|
||||
dct = builtin_from_name(inference_state, 'dict')
|
||||
generics = (
|
||||
@@ -129,8 +128,7 @@ def infer_param(function_value, param, ignore_stars=False):
|
||||
return ValueSet([GenericClass(
|
||||
dct,
|
||||
TupleGenericManager(generics),
|
||||
) for c in values])
|
||||
pass
|
||||
)])
|
||||
return values
|
||||
|
||||
|
||||
@@ -220,8 +218,6 @@ def infer_return_types(function, arguments):
|
||||
function.get_default_param_context(),
|
||||
match.group(1).strip()
|
||||
).execute_annotation()
|
||||
if annotation is None:
|
||||
return NO_VALUES
|
||||
|
||||
context = function.get_default_param_context()
|
||||
unknown_type_vars = find_unknown_type_vars(context, annotation)
|
||||
@@ -269,26 +265,25 @@ def infer_type_vars_for_execution(function, arguments, annotation_dict):
|
||||
elif kind is Parameter.VAR_KEYWORD:
|
||||
# TODO _dict_values is not public.
|
||||
actual_value_set = actual_value_set.try_merge('_dict_values')
|
||||
for ann in annotation_value_set:
|
||||
_merge_type_var_dicts(
|
||||
annotation_variable_results,
|
||||
_infer_type_vars(ann, actual_value_set),
|
||||
)
|
||||
merge_type_var_dicts(
|
||||
annotation_variable_results,
|
||||
annotation_value_set.infer_type_vars(actual_value_set),
|
||||
)
|
||||
return annotation_variable_results
|
||||
|
||||
|
||||
def infer_return_for_callable(arguments, param_values, result_values):
|
||||
result = NO_VALUES
|
||||
all_type_vars = {}
|
||||
for pv in param_values:
|
||||
if pv.array_type == 'list':
|
||||
type_var_dict = infer_type_vars_for_callable(arguments, pv.py__iter__())
|
||||
all_type_vars.update(type_var_dict)
|
||||
|
||||
result |= ValueSet.from_sets(
|
||||
v.define_generics(type_var_dict)
|
||||
if isinstance(v, (DefineGenericBase, TypeVar)) else ValueSet({v})
|
||||
for v in result_values
|
||||
).execute_annotation()
|
||||
return result
|
||||
return ValueSet.from_sets(
|
||||
v.define_generics(all_type_vars)
|
||||
if isinstance(v, (DefineGenericBase, TypeVar)) else ValueSet({v})
|
||||
for v in result_values
|
||||
).execute_annotation()
|
||||
|
||||
|
||||
def infer_type_vars_for_callable(arguments, lazy_params):
|
||||
@@ -302,15 +297,14 @@ def infer_type_vars_for_callable(arguments, lazy_params):
|
||||
callable_param_values = lazy_callable_param.infer()
|
||||
# Infer unknown type var
|
||||
actual_value_set = lazy_value.infer()
|
||||
for v in callable_param_values:
|
||||
_merge_type_var_dicts(
|
||||
annotation_variable_results,
|
||||
_infer_type_vars(v, actual_value_set),
|
||||
)
|
||||
merge_type_var_dicts(
|
||||
annotation_variable_results,
|
||||
callable_param_values.infer_type_vars(actual_value_set),
|
||||
)
|
||||
return annotation_variable_results
|
||||
|
||||
|
||||
def _merge_type_var_dicts(base_dict, new_dict):
|
||||
def merge_type_var_dicts(base_dict, new_dict):
|
||||
for type_var_name, values in new_dict.items():
|
||||
if values:
|
||||
try:
|
||||
@@ -319,88 +313,60 @@ def _merge_type_var_dicts(base_dict, new_dict):
|
||||
base_dict[type_var_name] = values
|
||||
|
||||
|
||||
def _infer_type_vars(annotation_value, value_set, is_class_value=False):
|
||||
def merge_pairwise_generics(annotation_value, annotated_argument_class):
|
||||
"""
|
||||
This function tries to find information about undefined type vars and
|
||||
returns a dict from type var name to value set.
|
||||
Match up the generic parameters from the given argument class to the
|
||||
target annotation.
|
||||
|
||||
This is for example important to understand what `iter([1])` returns.
|
||||
According to typeshed, `iter` returns an `Iterator[_T]`:
|
||||
This walks the generic parameters immediately within the annotation and
|
||||
argument's type, in order to determine the concrete values of the
|
||||
annotation's parameters for the current case.
|
||||
|
||||
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
|
||||
For example, given the following code:
|
||||
|
||||
This functions would generate `int` for `_T` in this case, because it
|
||||
unpacks the `Iterable`.
|
||||
def values(mapping: Mapping[K, V]) -> List[V]: ...
|
||||
|
||||
for val in values({1: 'a'}):
|
||||
val
|
||||
|
||||
Then this function should be given representations of `Mapping[K, V]`
|
||||
and `Mapping[int, str]`, so that it can determine that `K` is `int and
|
||||
`V` is `str`.
|
||||
|
||||
Note that it is responsibility of the caller to traverse the MRO of the
|
||||
argument type as needed in order to find the type matching the
|
||||
annotation (in this case finding `Mapping[int, str]` as a parent of
|
||||
`Dict[int, str]`).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
`annotation_value`: represents the annotation to infer the concrete
|
||||
parameter types of.
|
||||
|
||||
`annotated_argument_class`: represents the annotated class of the
|
||||
argument being passed to the object annotated by `annotation_value`.
|
||||
"""
|
||||
|
||||
type_var_dict = {}
|
||||
if isinstance(annotation_value, TypeVar):
|
||||
if not is_class_value:
|
||||
return {annotation_value.py__name__(): value_set.py__class__()}
|
||||
return {annotation_value.py__name__(): value_set}
|
||||
elif isinstance(annotation_value, TypingClassValueWithIndex):
|
||||
name = annotation_value.py__name__()
|
||||
if name == 'Type':
|
||||
given = annotation_value.get_generics()
|
||||
if given:
|
||||
for nested_annotation_value in given[0]:
|
||||
_merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
_infer_type_vars(
|
||||
nested_annotation_value,
|
||||
value_set,
|
||||
is_class_value=True,
|
||||
)
|
||||
)
|
||||
elif name == 'Callable':
|
||||
given = annotation_value.get_generics()
|
||||
if len(given) == 2:
|
||||
for nested_annotation_value in given[1]:
|
||||
_merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
_infer_type_vars(
|
||||
nested_annotation_value,
|
||||
value_set.execute_annotation(),
|
||||
)
|
||||
)
|
||||
elif isinstance(annotation_value, GenericClass):
|
||||
name = annotation_value.py__name__()
|
||||
if name == 'Iterable':
|
||||
given = annotation_value.get_generics()
|
||||
if given:
|
||||
for nested_annotation_value in given[0]:
|
||||
_merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
_infer_type_vars(
|
||||
nested_annotation_value,
|
||||
value_set.merge_types_of_iterate()
|
||||
)
|
||||
)
|
||||
elif name == 'Mapping':
|
||||
given = annotation_value.get_generics()
|
||||
if len(given) == 2:
|
||||
for value in value_set:
|
||||
try:
|
||||
method = value.get_mapping_item_values
|
||||
except AttributeError:
|
||||
continue
|
||||
key_values, value_values = method()
|
||||
|
||||
for nested_annotation_value in given[0]:
|
||||
_merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
_infer_type_vars(
|
||||
nested_annotation_value,
|
||||
key_values,
|
||||
)
|
||||
)
|
||||
for nested_annotation_value in given[1]:
|
||||
_merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
_infer_type_vars(
|
||||
nested_annotation_value,
|
||||
value_values,
|
||||
)
|
||||
)
|
||||
if not isinstance(annotated_argument_class, DefineGenericBase):
|
||||
return type_var_dict
|
||||
|
||||
annotation_generics = annotation_value.get_generics()
|
||||
actual_generics = annotated_argument_class.get_generics()
|
||||
|
||||
for annotation_generics_set, actual_generic_set in zip(annotation_generics, actual_generics):
|
||||
merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
annotation_generics_set.infer_type_vars(
|
||||
actual_generic_set,
|
||||
# This is a note to ourselves that we have already
|
||||
# converted the instance representation to its class.
|
||||
is_class_value=True,
|
||||
),
|
||||
)
|
||||
|
||||
return type_var_dict
|
||||
|
||||
|
||||
|
||||
@@ -165,6 +165,18 @@ class GenericClass(ClassMixin, DefineGenericBase):
|
||||
def _get_wrapped_value(self):
|
||||
return self._class_value
|
||||
|
||||
def get_type_hint(self, add_class_info=True):
|
||||
n = self.py__name__()
|
||||
# Not sure if this is the best way to do this, but all of these types
|
||||
# are a bit special in that they have type aliases and other ways to
|
||||
# become lower case. It's probably better to make them upper case,
|
||||
# because that's what you can use in annotations.
|
||||
n = dict(list="List", dict="Dict", set="Set", tuple="Tuple").get(n, n)
|
||||
s = n + self._generics_manager.get_type_hint()
|
||||
if add_class_info:
|
||||
return 'Type[%s]' % s
|
||||
return s
|
||||
|
||||
def get_type_var_filter(self):
|
||||
return _TypeVarFilter(self.get_generics(), self.list_type_vars())
|
||||
|
||||
@@ -188,6 +200,47 @@ class GenericClass(ClassMixin, DefineGenericBase):
|
||||
return True
|
||||
return self._class_value.is_sub_class_of(class_value)
|
||||
|
||||
def infer_type_vars(self, value_set, is_class_value=False):
|
||||
# Circular
|
||||
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
|
||||
|
||||
annotation_name = self.py__name__()
|
||||
type_var_dict = {}
|
||||
if annotation_name == 'Iterable' and not is_class_value:
|
||||
annotation_generics = self.get_generics()
|
||||
if annotation_generics:
|
||||
return annotation_generics[0].infer_type_vars(
|
||||
value_set.merge_types_of_iterate(),
|
||||
)
|
||||
|
||||
else:
|
||||
# Note: we need to handle the MRO _in order_, so we need to extract
|
||||
# the elements from the set first, then handle them, even if we put
|
||||
# them back in a set afterwards.
|
||||
for py_class in value_set:
|
||||
if not is_class_value:
|
||||
if py_class.is_instance() and not py_class.is_compiled():
|
||||
py_class = py_class.get_annotated_class_object()
|
||||
else:
|
||||
continue
|
||||
|
||||
if py_class.api_type != u'class':
|
||||
# Functions & modules don't have an MRO and we're not
|
||||
# expecting a Callable (those are handled separately within
|
||||
# TypingClassValueWithIndex).
|
||||
continue
|
||||
|
||||
for parent_class in py_class.py__mro__():
|
||||
class_name = parent_class.py__name__()
|
||||
if annotation_name == class_name:
|
||||
merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
merge_pairwise_generics(self, parent_class),
|
||||
)
|
||||
break
|
||||
|
||||
return type_var_dict
|
||||
|
||||
|
||||
class _LazyGenericBaseClass(object):
|
||||
def __init__(self, class_value, lazy_base_class):
|
||||
@@ -239,6 +292,9 @@ class _GenericInstanceWrapper(ValueWrapper):
|
||||
return ValueSet([builtin_from_name(self.inference_state, u'None')])
|
||||
return self._wrapped_value.py__stop_iteration_returns()
|
||||
|
||||
def get_type_hint(self, add_class_info=True):
|
||||
return self._wrapped_value.class_value.get_type_hint(add_class_info=False)
|
||||
|
||||
|
||||
class _PseudoTreeNameClass(Value):
|
||||
"""
|
||||
@@ -285,6 +341,9 @@ class _PseudoTreeNameClass(Value):
|
||||
def name(self):
|
||||
return ValueName(self, self._tree_name)
|
||||
|
||||
def get_qualified_names(self):
|
||||
return (self._tree_name.value,)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (self.__class__.__name__, self._tree_name.value)
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ from jedi.inference.base_value import ValueSet, \
|
||||
NO_VALUES
|
||||
from jedi.inference.utils import to_list
|
||||
from jedi.inference.gradual.stub_value import StubModuleValue
|
||||
from jedi.inference.gradual.typeshed import try_to_load_stub_cached
|
||||
from jedi.inference.value.decorator import Decoratee
|
||||
|
||||
|
||||
def _stub_to_python_value_set(stub_value, ignore_compiled=False):
|
||||
@@ -10,6 +12,10 @@ def _stub_to_python_value_set(stub_value, ignore_compiled=False):
|
||||
if not stub_module_context.is_stub():
|
||||
return ValueSet([stub_value])
|
||||
|
||||
decorates = None
|
||||
if isinstance(stub_value, Decoratee):
|
||||
decorates = stub_value._original_value
|
||||
|
||||
was_instance = stub_value.is_instance()
|
||||
if was_instance:
|
||||
stub_value = stub_value.py__class__()
|
||||
@@ -36,6 +42,8 @@ def _stub_to_python_value_set(stub_value, ignore_compiled=False):
|
||||
# Now that the instance has been properly created, we can simply get
|
||||
# the method.
|
||||
values = values.py__getattribute__(method_name)
|
||||
if decorates is not None:
|
||||
values = ValueSet(Decoratee(v, decorates) for v in values)
|
||||
return values
|
||||
|
||||
|
||||
@@ -73,7 +81,13 @@ def _try_stub_to_python_names(names, prefer_stub_to_compiled=False):
|
||||
converted_names = converted.goto(name.get_public_name())
|
||||
if converted_names:
|
||||
for n in converted_names:
|
||||
yield n
|
||||
if n.get_root_context().is_stub():
|
||||
# If it's a stub again, it means we're going in
|
||||
# a circle. Probably some imports make it a
|
||||
# stub again.
|
||||
yield name
|
||||
else:
|
||||
yield n
|
||||
continue
|
||||
yield name
|
||||
|
||||
@@ -81,8 +95,7 @@ def _try_stub_to_python_names(names, prefer_stub_to_compiled=False):
|
||||
def _load_stub_module(module):
|
||||
if module.is_stub():
|
||||
return module
|
||||
from jedi.inference.gradual.typeshed import _try_to_load_stub_cached
|
||||
return _try_to_load_stub_cached(
|
||||
return try_to_load_stub_cached(
|
||||
module.inference_state,
|
||||
import_names=module.string_names,
|
||||
python_value_set=ValueSet([module]),
|
||||
@@ -130,7 +143,9 @@ def _python_to_stub_names(names, fallback_to_python=False):
|
||||
|
||||
|
||||
def convert_names(names, only_stubs=False, prefer_stubs=False, prefer_stub_to_compiled=True):
|
||||
assert not (only_stubs and prefer_stubs)
|
||||
if only_stubs and prefer_stubs:
|
||||
raise ValueError("You cannot use both of only_stubs and prefer_stubs.")
|
||||
|
||||
with debug.increase_indent_cm('convert names'):
|
||||
if only_stubs or prefer_stubs:
|
||||
return _python_to_stub_names(names, fallback_to_python=prefer_stubs)
|
||||
|
||||
@@ -31,6 +31,9 @@ class _AbstractGenericManager(object):
|
||||
debug.warning('No param #%s found for annotation %s', index, self)
|
||||
return NO_VALUES
|
||||
|
||||
def get_type_hint(self):
|
||||
return '[%s]' % ', '.join(t.get_type_hint(add_class_info=False) for t in self.to_tuple())
|
||||
|
||||
|
||||
class LazyGenericManager(_AbstractGenericManager):
|
||||
def __init__(self, context_of_index, index_value):
|
||||
|
||||
@@ -41,7 +41,7 @@ class StubModuleValue(ModuleValue):
|
||||
|
||||
def get_filters(self, origin_scope=None):
|
||||
filters = super(StubModuleValue, self).get_filters(origin_scope)
|
||||
next(filters) # Ignore the first filter and replace it with our own
|
||||
next(filters, None) # Ignore the first filter and replace it with our own
|
||||
stub_filters = self._get_stub_filters(origin_scope=origin_scope)
|
||||
for f in stub_filters:
|
||||
yield f
|
||||
@@ -63,7 +63,9 @@ class StubModuleContext(ModuleContext):
|
||||
class TypingModuleWrapper(StubModuleValue):
|
||||
def get_filters(self, *args, **kwargs):
|
||||
filters = super(TypingModuleWrapper, self).get_filters(*args, **kwargs)
|
||||
yield TypingModuleFilterWrapper(next(filters))
|
||||
f = next(filters, None)
|
||||
assert f is not None
|
||||
yield TypingModuleFilterWrapper(f)
|
||||
for f in filters:
|
||||
yield f
|
||||
|
||||
@@ -74,7 +76,7 @@ class TypingModuleWrapper(StubModuleValue):
|
||||
class TypingModuleContext(ModuleContext):
|
||||
def get_filters(self, *args, **kwargs):
|
||||
filters = super(TypingModuleContext, self).get_filters(*args, **kwargs)
|
||||
yield TypingModuleFilterWrapper(next(filters))
|
||||
yield TypingModuleFilterWrapper(next(filters, None))
|
||||
for f in filters:
|
||||
yield f
|
||||
|
||||
|
||||
@@ -107,5 +107,11 @@ class TypeVar(BaseTypingValue):
|
||||
def execute_annotation(self):
|
||||
return self._get_classes().execute_annotation()
|
||||
|
||||
def infer_type_vars(self, value_set, is_class_value=False):
|
||||
annotation_name = self.py__name__()
|
||||
if not is_class_value:
|
||||
return {annotation_name: value_set.py__class__()}
|
||||
return {annotation_name: value_set}
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.py__name__())
|
||||
|
||||
@@ -2,6 +2,7 @@ import os
|
||||
import re
|
||||
from functools import wraps
|
||||
|
||||
from jedi import settings
|
||||
from jedi.file_io import FileIO
|
||||
from jedi._compatibility import FileNotFoundError, cast_path
|
||||
from jedi.parser_utils import get_cached_code_lines
|
||||
@@ -102,9 +103,6 @@ def import_module_decorator(func):
|
||||
# ``os.path``, because it's a very important one in Python
|
||||
# that is being achieved by messing with ``sys.modules`` in
|
||||
# ``os``.
|
||||
python_parent = next(iter(parent_module_values))
|
||||
if python_parent is None:
|
||||
python_parent, = inference_state.import_module(('os',), prefer_stubs=False)
|
||||
python_value_set = ValueSet.from_sets(
|
||||
func(inference_state, (n,), None, sys_path,)
|
||||
for n in [u'posixpath', u'ntpath', u'macpath', u'os2emxpath']
|
||||
@@ -119,8 +117,8 @@ def import_module_decorator(func):
|
||||
if not prefer_stubs:
|
||||
return python_value_set
|
||||
|
||||
stub = _try_to_load_stub_cached(inference_state, import_names, python_value_set,
|
||||
parent_module_value, sys_path)
|
||||
stub = try_to_load_stub_cached(inference_state, import_names, python_value_set,
|
||||
parent_module_value, sys_path)
|
||||
if stub is not None:
|
||||
return ValueSet([stub])
|
||||
return python_value_set
|
||||
@@ -128,7 +126,10 @@ def import_module_decorator(func):
|
||||
return wrapper
|
||||
|
||||
|
||||
def _try_to_load_stub_cached(inference_state, import_names, *args, **kwargs):
|
||||
def try_to_load_stub_cached(inference_state, import_names, *args, **kwargs):
|
||||
if import_names is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
return inference_state.stub_module_cache[import_names]
|
||||
except KeyError:
|
||||
@@ -152,7 +153,7 @@ def _try_to_load_stub(inference_state, import_names, python_value_set,
|
||||
"""
|
||||
if parent_module_value is None and len(import_names) > 1:
|
||||
try:
|
||||
parent_module_value = _try_to_load_stub_cached(
|
||||
parent_module_value = try_to_load_stub_cached(
|
||||
inference_state, import_names[:-1], NO_VALUES,
|
||||
parent_module_value=None, sys_path=sys_path)
|
||||
except KeyError:
|
||||
@@ -255,11 +256,7 @@ def _load_from_typeshed(inference_state, python_value_set, parent_module_value,
|
||||
|
||||
def _try_to_load_stub_from_file(inference_state, python_value_set, file_io, import_names):
|
||||
try:
|
||||
stub_module_node = inference_state.parse(
|
||||
file_io=file_io,
|
||||
cache=True,
|
||||
use_latest_grammar=True
|
||||
)
|
||||
stub_module_node = parse_stub_module(inference_state, file_io)
|
||||
except (OSError, IOError): # IOError is Python 2 only
|
||||
# The file that you're looking for doesn't exist (anymore).
|
||||
return None
|
||||
@@ -270,6 +267,16 @@ def _try_to_load_stub_from_file(inference_state, python_value_set, file_io, impo
|
||||
)
|
||||
|
||||
|
||||
def parse_stub_module(inference_state, file_io):
|
||||
return inference_state.parse(
|
||||
file_io=file_io,
|
||||
cache=True,
|
||||
diff_cache=settings.fast_parser,
|
||||
cache_path=settings.cache_directory,
|
||||
use_latest_grammar=True
|
||||
)
|
||||
|
||||
|
||||
def create_stub_module(inference_state, python_value_set, stub_module_node, file_io, import_names):
|
||||
if import_names == ('typing',):
|
||||
module_cls = TypingModuleWrapper
|
||||
|
||||
@@ -5,8 +5,11 @@ values.
|
||||
|
||||
This file deals with all the typing.py cases.
|
||||
"""
|
||||
import itertools
|
||||
|
||||
from jedi._compatibility import unicode
|
||||
from jedi import debug
|
||||
from jedi.inference.compiled import builtin_from_name
|
||||
from jedi.inference.compiled import builtin_from_name, create_simple_object
|
||||
from jedi.inference.base_value import ValueSet, NO_VALUES, Value, \
|
||||
LazyValueWrapper
|
||||
from jedi.inference.lazy_value import LazyKnownValues
|
||||
@@ -81,7 +84,8 @@ class TypingModuleName(NameWrapper):
|
||||
elif name == 'TypedDict':
|
||||
# TODO doesn't even exist in typeshed/typing.py, yet. But will be
|
||||
# added soon.
|
||||
pass
|
||||
yield TypedDictBase.create_cached(
|
||||
inference_state, self.parent_context, self.tree_name)
|
||||
elif name in ('no_type_check', 'no_type_check_decorator'):
|
||||
# This is not necessary, as long as we are not doing type checking.
|
||||
for c in self._wrapped_name.infer(): # Fuck my life Python 2
|
||||
@@ -180,7 +184,48 @@ class _TypingClassMixin(ClassMixin):
|
||||
|
||||
|
||||
class TypingClassValueWithIndex(_TypingClassMixin, TypingValueWithIndex):
|
||||
pass
|
||||
def infer_type_vars(self, value_set, is_class_value=False):
|
||||
# Circular
|
||||
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
|
||||
|
||||
type_var_dict = {}
|
||||
annotation_generics = self.get_generics()
|
||||
|
||||
if not annotation_generics:
|
||||
return type_var_dict
|
||||
|
||||
annotation_name = self.py__name__()
|
||||
if annotation_name == 'Type':
|
||||
if is_class_value:
|
||||
# This only applies if we are comparing something like
|
||||
# List[Type[int]] with Iterable[Type[int]]. First, Jedi tries to
|
||||
# match List/Iterable. After that we will land here, because
|
||||
# is_class_value will be True at that point. Obviously we also
|
||||
# compare below that both sides are `Type`.
|
||||
for element in value_set:
|
||||
element_name = element.py__name__()
|
||||
if element_name == 'Type':
|
||||
merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
merge_pairwise_generics(self, element),
|
||||
)
|
||||
else:
|
||||
return annotation_generics[0].infer_type_vars(
|
||||
value_set,
|
||||
is_class_value=True,
|
||||
)
|
||||
|
||||
elif annotation_name == 'Callable':
|
||||
if len(annotation_generics) == 2:
|
||||
return annotation_generics[1].infer_type_vars(
|
||||
value_set.execute_annotation(),
|
||||
)
|
||||
|
||||
elif annotation_name == 'Tuple':
|
||||
tuple_annotation, = self.execute_annotation()
|
||||
return tuple_annotation.infer_type_vars(value_set, is_class_value)
|
||||
|
||||
return type_var_dict
|
||||
|
||||
|
||||
class ProxyTypingClassValue(_TypingClassMixin, ProxyTypingValue):
|
||||
@@ -241,12 +286,7 @@ class Callable(BaseTypingValueWithGenerics):
|
||||
return infer_return_for_callable(arguments, param_values, result_values)
|
||||
|
||||
|
||||
class Tuple(LazyValueWrapper):
|
||||
def __init__(self, parent_context, name, generics_manager):
|
||||
self.inference_state = parent_context.inference_state
|
||||
self.parent_context = parent_context
|
||||
self._generics_manager = generics_manager
|
||||
|
||||
class Tuple(BaseTypingValueWithGenerics):
|
||||
def _is_homogenous(self):
|
||||
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
|
||||
# is used.
|
||||
@@ -282,6 +322,38 @@ class Tuple(LazyValueWrapper):
|
||||
.py__getattribute__('tuple').execute_annotation()
|
||||
return tuple_
|
||||
|
||||
def infer_type_vars(self, value_set, is_class_value=False):
|
||||
# Circular
|
||||
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
|
||||
from jedi.inference.gradual.base import GenericClass
|
||||
|
||||
if self._is_homogenous():
|
||||
# The parameter annotation is of the form `Tuple[T, ...]`,
|
||||
# so we treat the incoming tuple like a iterable sequence
|
||||
# rather than a positional container of elements.
|
||||
return self.get_generics()[0].infer_type_vars(
|
||||
value_set.merge_types_of_iterate(),
|
||||
)
|
||||
|
||||
else:
|
||||
# The parameter annotation has only explicit type parameters
|
||||
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
|
||||
# treat the incoming values as needing to match the annotation
|
||||
# exactly, just as we would for non-tuple annotations.
|
||||
|
||||
type_var_dict = {}
|
||||
for element in value_set:
|
||||
py_class = element.get_annotated_class_object()
|
||||
if not isinstance(py_class, GenericClass):
|
||||
py_class = element
|
||||
|
||||
merge_type_var_dicts(
|
||||
type_var_dict,
|
||||
merge_pairwise_generics(self, py_class),
|
||||
)
|
||||
|
||||
return type_var_dict
|
||||
|
||||
|
||||
class Generic(BaseTypingValueWithGenerics):
|
||||
pass
|
||||
@@ -339,3 +411,47 @@ class CastFunction(BaseTypingValue):
|
||||
@repack_with_argument_clinic('type, object, /')
|
||||
def py__call__(self, type_value_set, object_value_set):
|
||||
return type_value_set.execute_annotation()
|
||||
|
||||
|
||||
class TypedDictBase(BaseTypingValue):
|
||||
"""
|
||||
This class has no responsibilities and is just here to make sure that typed
|
||||
dicts can be identified.
|
||||
"""
|
||||
|
||||
|
||||
class TypedDict(LazyValueWrapper):
|
||||
"""Represents the instance version of ``TypedDictClass``."""
|
||||
def __init__(self, definition_class):
|
||||
self.inference_state = definition_class.inference_state
|
||||
self.parent_context = definition_class.parent_context
|
||||
self.tree_node = definition_class.tree_node
|
||||
self._definition_class = definition_class
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return ValueName(self, self.tree_node.name)
|
||||
|
||||
def py__simple_getitem__(self, index):
|
||||
if isinstance(index, unicode):
|
||||
return ValueSet.from_sets(
|
||||
name.infer()
|
||||
for filter in self._definition_class.get_filters(is_instance=True)
|
||||
for name in filter.get(index)
|
||||
)
|
||||
return NO_VALUES
|
||||
|
||||
def get_key_values(self):
|
||||
filtered_values = itertools.chain.from_iterable((
|
||||
f.values()
|
||||
for f in self._definition_class.get_filters(is_instance=True)
|
||||
))
|
||||
return ValueSet({
|
||||
create_simple_object(self.inference_state, v.string_name)
|
||||
for v in filtered_values
|
||||
})
|
||||
|
||||
def _get_wrapped_value(self):
|
||||
d, = self.inference_state.builtins_module.py__getattribute__('dict')
|
||||
result, = d.execute_with_values()
|
||||
return result
|
||||
|
||||
@@ -8,7 +8,6 @@ from contextlib import contextmanager
|
||||
from parso.python import tree
|
||||
|
||||
from jedi._compatibility import unicode
|
||||
from jedi.parser_utils import get_parent_scope
|
||||
|
||||
|
||||
def is_stdlib_path(path):
|
||||
@@ -122,29 +121,6 @@ def get_names_of_node(node):
|
||||
return list(chain.from_iterable(get_names_of_node(c) for c in children))
|
||||
|
||||
|
||||
def get_module_names(module, all_scopes):
|
||||
"""
|
||||
Returns a dictionary with name parts as keys and their call paths as
|
||||
values.
|
||||
"""
|
||||
names = list(chain.from_iterable(module.get_used_names().values()))
|
||||
if not all_scopes:
|
||||
# We have to filter all the names that don't have the module as a
|
||||
# parent_scope. There's None as a parent, because nodes in the module
|
||||
# node have the parent module and not suite as all the others.
|
||||
# Therefore it's important to catch that case.
|
||||
|
||||
def is_module_scope_name(name):
|
||||
parent_scope = get_parent_scope(name)
|
||||
# async functions have an extra wrapper. Strip it.
|
||||
if parent_scope and parent_scope.type == 'async_stmt':
|
||||
parent_scope = parent_scope.parent
|
||||
return parent_scope in (module, None)
|
||||
|
||||
names = [n for n in names if is_module_scope_name(n)]
|
||||
return names
|
||||
|
||||
|
||||
def is_string(value):
|
||||
if value.inference_state.environment.version_info.major == 2:
|
||||
str_classes = (unicode, bytes)
|
||||
|
||||
@@ -16,9 +16,10 @@ import os
|
||||
from parso.python import tree
|
||||
from parso.tree import search_ancestor
|
||||
|
||||
from jedi._compatibility import ImplicitNSInfo, force_unicode
|
||||
from jedi._compatibility import ImplicitNSInfo, force_unicode, FileNotFoundError
|
||||
from jedi import debug
|
||||
from jedi import settings
|
||||
from jedi.file_io import FolderIO
|
||||
from jedi.parser_utils import get_cached_code_lines
|
||||
from jedi.inference import sys_path
|
||||
from jedi.inference import helpers
|
||||
@@ -28,8 +29,8 @@ from jedi.inference.utils import unite
|
||||
from jedi.inference.cache import inference_state_method_cache
|
||||
from jedi.inference.names import ImportName, SubModuleName
|
||||
from jedi.inference.base_value import ValueSet, NO_VALUES
|
||||
from jedi.inference.gradual.typeshed import import_module_decorator
|
||||
from jedi.inference.value.module import iter_module_names
|
||||
from jedi.inference.gradual.typeshed import import_module_decorator, \
|
||||
create_stub_module, parse_stub_module
|
||||
from jedi.plugins import plugin_manager
|
||||
|
||||
|
||||
@@ -191,18 +192,19 @@ class Importer(object):
|
||||
import_path = base + tuple(import_path)
|
||||
else:
|
||||
path = module_context.py__file__()
|
||||
project_path = self._inference_state.project._path
|
||||
import_path = list(import_path)
|
||||
if path is None:
|
||||
# If no path is defined, our best guess is that the current
|
||||
# file is edited by a user on the current working
|
||||
# directory. We need to add an initial path, because it
|
||||
# will get removed as the name of the current file.
|
||||
directory = os.getcwd()
|
||||
directory = project_path
|
||||
else:
|
||||
directory = os.path.dirname(path)
|
||||
|
||||
base_import_path, base_directory = _level_to_base_import_path(
|
||||
self._inference_state.project._path, directory, level,
|
||||
project_path, directory, level,
|
||||
)
|
||||
if base_directory is None:
|
||||
# Everything is lost, the relative import does point
|
||||
@@ -264,24 +266,15 @@ class Importer(object):
|
||||
Get the names of all modules in the search_path. This means file names
|
||||
and not names defined in the files.
|
||||
"""
|
||||
names = []
|
||||
# add builtin module names
|
||||
if search_path is None and in_module is None:
|
||||
names += [
|
||||
ImportName(self._module_context, name)
|
||||
for name in self._inference_state.compiled_subprocess.get_builtin_module_names()
|
||||
]
|
||||
|
||||
if search_path is None:
|
||||
search_path = self._sys_path_with_modifications(is_completion=True)
|
||||
|
||||
for name in iter_module_names(self._inference_state, search_path):
|
||||
if in_module is None:
|
||||
n = ImportName(self._module_context, name)
|
||||
else:
|
||||
n = SubModuleName(in_module.as_context(), name)
|
||||
names.append(n)
|
||||
return names
|
||||
sys_path = self._sys_path_with_modifications(is_completion=True)
|
||||
else:
|
||||
sys_path = search_path
|
||||
return list(iter_module_names(
|
||||
self._inference_state, self._module_context, sys_path,
|
||||
module_cls=ImportName if in_module is None else SubModuleName,
|
||||
add_builtin_modules=search_path is None and in_module is None,
|
||||
))
|
||||
|
||||
def completion_names(self, inference_state, only_modules=False):
|
||||
"""
|
||||
@@ -440,7 +433,7 @@ def _load_python_module(inference_state, file_io,
|
||||
file_io=file_io,
|
||||
cache=True,
|
||||
diff_cache=settings.fast_parser,
|
||||
cache_path=settings.cache_directory
|
||||
cache_path=settings.cache_directory,
|
||||
)
|
||||
|
||||
from jedi.inference.value import ModuleValue
|
||||
@@ -454,8 +447,12 @@ def _load_python_module(inference_state, file_io,
|
||||
|
||||
|
||||
def _load_builtin_module(inference_state, import_names=None, sys_path=None):
|
||||
project = inference_state.project
|
||||
if sys_path is None:
|
||||
sys_path = inference_state.get_sys_path()
|
||||
if not project._load_unsafe_extensions:
|
||||
safe_paths = project._get_base_sys_path(inference_state)
|
||||
sys_path = [p for p in sys_path if p in safe_paths]
|
||||
|
||||
dotted_name = '.'.join(import_names)
|
||||
assert dotted_name is not None
|
||||
@@ -467,32 +464,59 @@ def _load_builtin_module(inference_state, import_names=None, sys_path=None):
|
||||
return module
|
||||
|
||||
|
||||
def load_module_from_path(inference_state, file_io, base_names=None):
|
||||
def load_module_from_path(inference_state, file_io, import_names=None, is_package=None):
|
||||
"""
|
||||
This should pretty much only be used for get_modules_containing_name. It's
|
||||
here to ensure that a random path is still properly loaded into the Jedi
|
||||
module structure.
|
||||
"""
|
||||
path = file_io.path
|
||||
if base_names:
|
||||
module_name = os.path.basename(path)
|
||||
module_name = sys_path.remove_python_path_suffix(module_name)
|
||||
is_package = module_name == '__init__'
|
||||
if is_package:
|
||||
import_names = base_names
|
||||
else:
|
||||
import_names = base_names + (module_name,)
|
||||
else:
|
||||
if import_names is None:
|
||||
e_sys_path = inference_state.get_sys_path()
|
||||
import_names, is_package = sys_path.transform_path_to_dotted(e_sys_path, path)
|
||||
else:
|
||||
assert isinstance(is_package, bool)
|
||||
|
||||
module = _load_python_module(
|
||||
inference_state, file_io,
|
||||
import_names=import_names,
|
||||
is_package=is_package,
|
||||
is_stub = file_io.path.endswith('.pyi')
|
||||
if is_stub:
|
||||
folder_io = file_io.get_parent_folder()
|
||||
if folder_io.path.endswith('-stubs'):
|
||||
folder_io = FolderIO(folder_io.path[:-6])
|
||||
if file_io.path.endswith('__init__.pyi'):
|
||||
python_file_io = folder_io.get_file_io('__init__.py')
|
||||
else:
|
||||
python_file_io = folder_io.get_file_io(import_names[-1] + '.py')
|
||||
|
||||
try:
|
||||
v = load_module_from_path(
|
||||
inference_state, python_file_io,
|
||||
import_names, is_package=is_package
|
||||
)
|
||||
values = ValueSet([v])
|
||||
except FileNotFoundError:
|
||||
values = NO_VALUES
|
||||
|
||||
return create_stub_module(
|
||||
inference_state, values, parse_stub_module(inference_state, file_io),
|
||||
file_io, import_names
|
||||
)
|
||||
else:
|
||||
module = _load_python_module(
|
||||
inference_state, file_io,
|
||||
import_names=import_names,
|
||||
is_package=is_package,
|
||||
)
|
||||
inference_state.module_cache.add(import_names, ValueSet([module]))
|
||||
return module
|
||||
|
||||
|
||||
def load_namespace_from_path(inference_state, folder_io):
|
||||
import_names, is_package = sys_path.transform_path_to_dotted(
|
||||
inference_state.get_sys_path(),
|
||||
folder_io.path
|
||||
)
|
||||
inference_state.module_cache.add(import_names, ValueSet([module]))
|
||||
return module
|
||||
from jedi.inference.value.namespace import ImplicitNamespaceValue
|
||||
return ImplicitNamespaceValue(inference_state, import_names, [folder_io.path])
|
||||
|
||||
|
||||
def follow_error_node_imports_if_possible(context, name):
|
||||
@@ -522,3 +546,18 @@ def follow_error_node_imports_if_possible(context, name):
|
||||
return Importer(
|
||||
context.inference_state, names, context.get_root_context(), level).follow()
|
||||
return None
|
||||
|
||||
|
||||
def iter_module_names(inference_state, module_context, search_path,
|
||||
module_cls=ImportName, add_builtin_modules=True):
|
||||
"""
|
||||
Get the names of all modules in the search_path. This means file names
|
||||
and not names defined in the files.
|
||||
"""
|
||||
# add builtin module names
|
||||
if add_builtin_modules:
|
||||
for name in inference_state.compiled_subprocess.get_builtin_module_names():
|
||||
yield module_cls(module_context, name)
|
||||
|
||||
for name in inference_state.compiled_subprocess.iter_module_names(search_path):
|
||||
yield module_cls(module_context, name)
|
||||
|
||||
@@ -3,8 +3,10 @@ from jedi.common.utils import monkeypatch
|
||||
|
||||
|
||||
class AbstractLazyValue(object):
|
||||
def __init__(self, data):
|
||||
def __init__(self, data, min=1, max=1):
|
||||
self.data = data
|
||||
self.min = min
|
||||
self.max = max
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.data)
|
||||
@@ -26,16 +28,16 @@ class LazyKnownValues(AbstractLazyValue):
|
||||
|
||||
|
||||
class LazyUnknownValue(AbstractLazyValue):
|
||||
def __init__(self):
|
||||
super(LazyUnknownValue, self).__init__(None)
|
||||
def __init__(self, min=1, max=1):
|
||||
super(LazyUnknownValue, self).__init__(None, min, max)
|
||||
|
||||
def infer(self):
|
||||
return NO_VALUES
|
||||
|
||||
|
||||
class LazyTreeValue(AbstractLazyValue):
|
||||
def __init__(self, context, node):
|
||||
super(LazyTreeValue, self).__init__(node)
|
||||
def __init__(self, context, node, min=1, max=1):
|
||||
super(LazyTreeValue, self).__init__(node, min, max)
|
||||
self.context = context
|
||||
# We need to save the predefined names. It's an unfortunate side effect
|
||||
# that needs to be tracked otherwise results will be wrong.
|
||||
|
||||
@@ -228,7 +228,13 @@ class ValueNameMixin(object):
|
||||
return ValueSet([self._value])
|
||||
|
||||
def py__doc__(self):
|
||||
return self._value.py__doc__()
|
||||
doc = self._value.py__doc__()
|
||||
if not doc and self._value.is_stub():
|
||||
from jedi.inference.gradual.conversion import convert_names
|
||||
names = convert_names([self], prefer_stub_to_compiled=False)
|
||||
if self not in names:
|
||||
return _merge_name_docs(names)
|
||||
return doc
|
||||
|
||||
def _get_qualified_names(self):
|
||||
return self._value.get_qualified_names()
|
||||
@@ -634,7 +640,7 @@ class StubName(StubNameMixin, TreeNameDefinition):
|
||||
inferred = super(StubName, self).infer()
|
||||
if self.string_name == 'version_info' and self.get_root_context().py__name__() == 'sys':
|
||||
from jedi.inference.gradual.stub_value import VersionInfo
|
||||
return [VersionInfo(c) for c in inferred]
|
||||
return ValueSet(VersionInfo(c) for c in inferred)
|
||||
return inferred
|
||||
|
||||
|
||||
|
||||
@@ -51,6 +51,25 @@ class ExecutedParamName(ParamName):
|
||||
|
||||
|
||||
def get_executed_param_names_and_issues(function_value, arguments):
|
||||
"""
|
||||
Return a tuple of:
|
||||
- a list of `ExecutedParamName`s corresponding to the arguments of the
|
||||
function execution `function_value`, containing the inferred value of
|
||||
those arguments (whether explicit or default)
|
||||
- a list of the issues encountered while building that list
|
||||
|
||||
For example, given:
|
||||
```
|
||||
def foo(a, b, c=None, d='d'): ...
|
||||
|
||||
foo(42, c='c')
|
||||
```
|
||||
|
||||
Then for the execution of `foo`, this will return a tuple containing:
|
||||
- a list with entries for each parameter a, b, c & d; the entries for a,
|
||||
c, & d will have their values (42, 'c' and 'd' respectively) included.
|
||||
- a list with a single entry about the lack of a value for `b`
|
||||
"""
|
||||
def too_many_args(argument):
|
||||
m = _error_argument_count(funcdef, len(unpacked_va))
|
||||
# Just report an error for the first param that is not needed (like
|
||||
@@ -207,6 +226,23 @@ def get_executed_param_names_and_issues(function_value, arguments):
|
||||
|
||||
|
||||
def get_executed_param_names(function_value, arguments):
|
||||
"""
|
||||
Return a list of `ExecutedParamName`s corresponding to the arguments of the
|
||||
function execution `function_value`, containing the inferred value of those
|
||||
arguments (whether explicit or default). Any issues building this list (for
|
||||
example required arguments which are missing in the invocation) are ignored.
|
||||
|
||||
For example, given:
|
||||
```
|
||||
def foo(a, b, c=None, d='d'): ...
|
||||
|
||||
foo(42, c='c')
|
||||
```
|
||||
|
||||
Then for the execution of `foo`, this will return a list containing entries
|
||||
for each parameter a, b, c & d; the entries for a, c, & d will have their
|
||||
values (42, 'c' and 'd' respectively) included.
|
||||
"""
|
||||
return get_executed_param_names_and_issues(function_value, arguments)[0]
|
||||
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@ Recursions are the recipe of |jedi| to conquer Python code. However, someone
|
||||
must stop recursions going mad. Some settings are here to make |jedi| stop at
|
||||
the right time. You can read more about them :ref:`here <settings-recursion>`.
|
||||
|
||||
Next to :mod:`jedi.inference.cache` this module also makes |jedi| not
|
||||
thread-safe. Why? ``execution_recursion_decorator`` uses class variables to
|
||||
Next to the internal ``jedi.inference.cache`` this module also makes |jedi| not
|
||||
thread-safe, because ``execution_recursion_decorator`` uses class variables to
|
||||
count the function calls.
|
||||
|
||||
.. _settings-recursion:
|
||||
@@ -34,7 +34,7 @@ from jedi.inference.base_value import NO_VALUES
|
||||
|
||||
recursion_limit = 15
|
||||
"""
|
||||
Like ``sys.getrecursionlimit()``, just for |jedi|.
|
||||
Like :func:`sys.getrecursionlimit()`, just for |jedi|.
|
||||
"""
|
||||
total_function_execution_limit = 200
|
||||
"""
|
||||
|
||||
@@ -3,9 +3,9 @@ import re
|
||||
|
||||
from parso import python_bytes_to_unicode
|
||||
|
||||
from jedi.debug import dbg
|
||||
from jedi.file_io import KnownContentFileIO
|
||||
from jedi.inference.imports import SubModuleName, load_module_from_path
|
||||
from jedi.inference.compiled import CompiledObject
|
||||
from jedi.inference.filters import ParserTreeFilter
|
||||
from jedi.inference.gradual.conversion import convert_names
|
||||
|
||||
@@ -38,8 +38,8 @@ def _resolve_names(definition_names, avoid_names=()):
|
||||
yield name
|
||||
|
||||
if name.api_type == 'module':
|
||||
for name in _resolve_names(name.goto(), definition_names):
|
||||
yield name
|
||||
for n in _resolve_names(name.goto(), definition_names):
|
||||
yield n
|
||||
|
||||
|
||||
def _dictionarize(names):
|
||||
@@ -109,8 +109,8 @@ def _find_global_variables(names, search_name):
|
||||
for global_name in method().get(search_name):
|
||||
yield global_name
|
||||
c = module_context.create_context(global_name.tree_name)
|
||||
for name in _add_names_in_same_context(c, global_name.string_name):
|
||||
yield name
|
||||
for n in _add_names_in_same_context(c, global_name.string_name):
|
||||
yield n
|
||||
|
||||
|
||||
def find_references(module_context, tree_name):
|
||||
@@ -170,7 +170,7 @@ def _check_fs(inference_state, file_io, regex):
|
||||
return None
|
||||
new_file_io = KnownContentFileIO(file_io.path, code)
|
||||
m = load_module_from_path(inference_state, new_file_io)
|
||||
if isinstance(m, CompiledObject):
|
||||
if m.is_compiled():
|
||||
return None
|
||||
return m.as_context()
|
||||
|
||||
@@ -193,14 +193,15 @@ def gitignored_lines(folder_io, file_io):
|
||||
return ignored_paths, ignored_names
|
||||
|
||||
|
||||
def _recurse_find_python_files(folder_io, except_paths):
|
||||
def recurse_find_python_folders_and_files(folder_io, except_paths=()):
|
||||
except_paths = set(except_paths)
|
||||
for root_folder_io, folder_ios, file_ios in folder_io.walk():
|
||||
# Delete folders that we don't want to iterate over.
|
||||
for file_io in file_ios:
|
||||
path = file_io.path
|
||||
if path.endswith('.py') or path.endswith('.pyi'):
|
||||
if path not in except_paths:
|
||||
yield file_io
|
||||
yield None, file_io
|
||||
|
||||
if path.endswith('.gitignore'):
|
||||
ignored_paths, ignored_names = \
|
||||
@@ -213,6 +214,14 @@ def _recurse_find_python_files(folder_io, except_paths):
|
||||
if folder_io.path not in except_paths
|
||||
and folder_io.get_base_name() not in _IGNORE_FOLDERS
|
||||
]
|
||||
for folder_io in folder_ios:
|
||||
yield folder_io, None
|
||||
|
||||
|
||||
def recurse_find_python_files(folder_io, except_paths=()):
|
||||
for folder_io, file_io in recurse_find_python_folders_and_files(folder_io, except_paths):
|
||||
if file_io is not None:
|
||||
yield file_io
|
||||
|
||||
|
||||
def _find_python_files_in_sys_path(inference_state, module_contexts):
|
||||
@@ -229,7 +238,7 @@ def _find_python_files_in_sys_path(inference_state, module_contexts):
|
||||
path = folder_io.path
|
||||
if not any(path.startswith(p) for p in sys_path) or path in except_paths:
|
||||
break
|
||||
for file_io in _recurse_find_python_files(folder_io, except_paths):
|
||||
for file_io in recurse_find_python_files(folder_io, except_paths):
|
||||
if file_io.path not in yielded_paths:
|
||||
yield file_io
|
||||
except_paths.add(path)
|
||||
@@ -255,19 +264,28 @@ def get_module_contexts_containing_name(inference_state, module_contexts, name,
|
||||
if len(name) <= 2:
|
||||
return
|
||||
|
||||
file_io_iterator = _find_python_files_in_sys_path(inference_state, module_contexts)
|
||||
for x in search_in_file_ios(inference_state, file_io_iterator, name,
|
||||
limit_reduction=limit_reduction):
|
||||
yield x # Python 2...
|
||||
|
||||
|
||||
def search_in_file_ios(inference_state, file_io_iterator, name, limit_reduction=1):
|
||||
parse_limit = _PARSED_FILE_LIMIT / limit_reduction
|
||||
open_limit = _OPENED_FILE_LIMIT / limit_reduction
|
||||
file_io_count = 0
|
||||
parsed_file_count = 0
|
||||
regex = re.compile(r'\b' + re.escape(name) + r'\b')
|
||||
for file_io in _find_python_files_in_sys_path(inference_state, module_contexts):
|
||||
for file_io in file_io_iterator:
|
||||
file_io_count += 1
|
||||
m = _check_fs(inference_state, file_io, regex)
|
||||
if m is not None:
|
||||
parsed_file_count += 1
|
||||
yield m
|
||||
if parsed_file_count >= parse_limit:
|
||||
dbg('Hit limit of parsed files: %s', parse_limit)
|
||||
break
|
||||
|
||||
if file_io_count >= open_limit:
|
||||
dbg('Hit limit of opened files: %s', open_limit)
|
||||
break
|
||||
|
||||
@@ -118,9 +118,10 @@ class TreeSignature(AbstractSignature):
|
||||
|
||||
|
||||
class BuiltinSignature(AbstractSignature):
|
||||
def __init__(self, value, return_string, is_bound=False):
|
||||
def __init__(self, value, return_string, function_value=None, is_bound=False):
|
||||
super(BuiltinSignature, self).__init__(value, is_bound)
|
||||
self._return_string = return_string
|
||||
self.__function_value = function_value
|
||||
|
||||
@property
|
||||
def annotation_string(self):
|
||||
@@ -128,10 +129,16 @@ class BuiltinSignature(AbstractSignature):
|
||||
|
||||
@property
|
||||
def _function_value(self):
|
||||
return self.value
|
||||
if self.__function_value is None:
|
||||
return self.value
|
||||
return self.__function_value
|
||||
|
||||
def bind(self, value):
|
||||
return BuiltinSignature(value, self._return_string, is_bound=True)
|
||||
return BuiltinSignature(
|
||||
value, self._return_string,
|
||||
function_value=self.value,
|
||||
is_bound=True
|
||||
)
|
||||
|
||||
|
||||
class SignatureWrapper(_SignatureMixin):
|
||||
|
||||
@@ -32,7 +32,7 @@ def _iter_nodes_for_param(param_name):
|
||||
argument = name.parent
|
||||
if argument.type == 'argument' \
|
||||
and argument.children[0] == '*' * param_name.star_count:
|
||||
# No support for Python <= 3.4 here, but they are end-of-life
|
||||
# No support for Python 2.7 here, but they are end-of-life
|
||||
# anyway
|
||||
trailer = search_ancestor(argument, 'trailer')
|
||||
if trailer is not None: # Make sure we're in a function
|
||||
|
||||
@@ -356,6 +356,12 @@ def infer_atom(context, atom):
|
||||
def infer_expr_stmt(context, stmt, seek_name=None):
|
||||
with recursion.execution_allowed(context.inference_state, stmt) as allowed:
|
||||
if allowed:
|
||||
if seek_name is not None:
|
||||
pep0484_values = \
|
||||
annotation.find_type_from_comment_hint_assign(context, stmt, seek_name)
|
||||
if pep0484_values:
|
||||
return pep0484_values
|
||||
|
||||
return _infer_expr_stmt(context, stmt, seek_name)
|
||||
return NO_VALUES
|
||||
|
||||
@@ -388,6 +394,7 @@ def _infer_expr_stmt(context, stmt, seek_name=None):
|
||||
|
||||
debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name)
|
||||
rhs = stmt.get_rhs()
|
||||
|
||||
value_set = context.infer_node(rhs)
|
||||
|
||||
if seek_name:
|
||||
@@ -596,7 +603,11 @@ def _infer_comparison_part(inference_state, context, left, operator, right):
|
||||
if str_operator in ('is', '!=', '==', 'is not'):
|
||||
operation = COMPARISON_OPERATORS[str_operator]
|
||||
bool_ = operation(left, right)
|
||||
return ValueSet([_bool_to_value(inference_state, bool_)])
|
||||
# Only if == returns True or != returns False, we can continue.
|
||||
# There's no guarantee that they are not equal. This can help
|
||||
# in some cases, but does not cover everything.
|
||||
if (str_operator in ('is', '==')) == bool_:
|
||||
return ValueSet([_bool_to_value(inference_state, bool_)])
|
||||
|
||||
if isinstance(left, VersionInfo):
|
||||
version_info = _get_tuple_ints(right)
|
||||
@@ -631,23 +642,6 @@ def _infer_comparison_part(inference_state, context, left, operator, right):
|
||||
return result
|
||||
|
||||
|
||||
def _remove_statements(context, stmt, name):
|
||||
"""
|
||||
This is the part where statements are being stripped.
|
||||
|
||||
Due to lazy type inference, statements like a = func; b = a; b() have to be
|
||||
inferred.
|
||||
|
||||
TODO merge with infer_expr_stmt?
|
||||
"""
|
||||
pep0484_values = \
|
||||
annotation.find_type_from_comment_hint_assign(context, stmt, name)
|
||||
if pep0484_values:
|
||||
return pep0484_values
|
||||
|
||||
return infer_expr_stmt(context, stmt, seek_name=name)
|
||||
|
||||
|
||||
@plugin_manager.decorate()
|
||||
def tree_name_to_values(inference_state, context, tree_name):
|
||||
value_set = NO_VALUES
|
||||
@@ -655,16 +649,18 @@ def tree_name_to_values(inference_state, context, tree_name):
|
||||
# First check for annotations, like: `foo: int = 3`
|
||||
if module_node is not None:
|
||||
names = module_node.get_used_names().get(tree_name.value, [])
|
||||
found_annotation = False
|
||||
for name in names:
|
||||
expr_stmt = name.parent
|
||||
|
||||
if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign":
|
||||
correct_scope = parser_utils.get_parent_scope(name) == context.tree_node
|
||||
if correct_scope:
|
||||
found_annotation = True
|
||||
value_set |= annotation.infer_annotation(
|
||||
context, expr_stmt.children[1].children[1]
|
||||
).execute_annotation()
|
||||
if value_set:
|
||||
if found_annotation:
|
||||
return value_set
|
||||
|
||||
types = []
|
||||
@@ -710,7 +706,7 @@ def tree_name_to_values(inference_state, context, tree_name):
|
||||
n = TreeNameDefinition(context, tree_name)
|
||||
types = check_tuple_assignments(n, for_types)
|
||||
elif typ == 'expr_stmt':
|
||||
types = _remove_statements(context, node, tree_name)
|
||||
types = infer_expr_stmt(context, node, tree_name)
|
||||
elif typ == 'with_stmt':
|
||||
value_managers = context.infer_node(node.get_test_node_from_name(tree_name))
|
||||
enter_methods = value_managers.py__getattribute__(u'__enter__')
|
||||
@@ -725,7 +721,9 @@ def tree_name_to_values(inference_state, context, tree_name):
|
||||
# the static analysis report.
|
||||
exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling())
|
||||
types = exceptions.execute_with_values()
|
||||
elif node.type == 'param':
|
||||
elif typ == 'param':
|
||||
types = NO_VALUES
|
||||
elif typ == 'del_stmt':
|
||||
types = NO_VALUES
|
||||
else:
|
||||
raise ValueError("Should not happen. type: %s" % typ)
|
||||
@@ -795,7 +793,8 @@ def check_tuple_assignments(name, value_set):
|
||||
if isinstance(index, slice):
|
||||
# For no star unpacking is not possible.
|
||||
return NO_VALUES
|
||||
for _ in range(index + 1):
|
||||
i = 0
|
||||
while i <= index:
|
||||
try:
|
||||
lazy_value = next(iterated)
|
||||
except StopIteration:
|
||||
@@ -804,6 +803,8 @@ def check_tuple_assignments(name, value_set):
|
||||
# index number is high. Therefore break if the loop is
|
||||
# finished.
|
||||
return NO_VALUES
|
||||
else:
|
||||
i += lazy_value.max
|
||||
value_set = lazy_value.infer()
|
||||
return value_set
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
from jedi._compatibility import unicode, force_unicode, all_suffixes
|
||||
from jedi.inference.cache import inference_state_method_cache
|
||||
@@ -10,6 +11,8 @@ from jedi.file_io import FileIO
|
||||
from jedi import settings
|
||||
from jedi import debug
|
||||
|
||||
_BUILDOUT_PATH_INSERTION_LIMIT = 10
|
||||
|
||||
|
||||
def _abs_path(module_context, path):
|
||||
if os.path.isabs(path):
|
||||
@@ -138,6 +141,8 @@ def discover_buildout_paths(inference_state, script_path):
|
||||
for buildout_script_path in _get_buildout_script_paths(script_path):
|
||||
for path in _get_paths_from_buildout_script(inference_state, buildout_script_path):
|
||||
buildout_script_paths.add(path)
|
||||
if len(buildout_script_paths) >= _BUILDOUT_PATH_INSERTION_LIMIT:
|
||||
break
|
||||
|
||||
return buildout_script_paths
|
||||
|
||||
@@ -203,7 +208,7 @@ def _get_buildout_script_paths(search_path):
|
||||
|
||||
|
||||
def remove_python_path_suffix(path):
|
||||
for suffix in all_suffixes():
|
||||
for suffix in all_suffixes() + ['.pyi']:
|
||||
if path.endswith(suffix):
|
||||
path = path[:-len(suffix)]
|
||||
break
|
||||
@@ -250,7 +255,9 @@ def transform_path_to_dotted(sys_path, module_path):
|
||||
# is very strange and is probably a file that is called
|
||||
# `.py`.
|
||||
return
|
||||
yield tuple(split)
|
||||
# Stub folders for foo can end with foo-stubs. Just remove
|
||||
# it.
|
||||
yield tuple(re.sub(r'-stubs$', '', s) for s in split)
|
||||
|
||||
potential_solutions = tuple(iter_potential_solutions())
|
||||
if not potential_solutions:
|
||||
|
||||
@@ -107,19 +107,9 @@ class PushBackIterator(object):
|
||||
def ignored(*exceptions):
|
||||
"""
|
||||
Value manager that ignores all of the specified exceptions. This will
|
||||
be in the standard library starting with Python 3.4.
|
||||
be in the standard library starting with Python 3.5.
|
||||
"""
|
||||
try:
|
||||
yield
|
||||
except exceptions:
|
||||
pass
|
||||
|
||||
|
||||
def indent_block(text, indention=' '):
|
||||
"""This function indents a text block with a default of four spaces."""
|
||||
temp = ''
|
||||
while text and text[-1] == '\n':
|
||||
temp += text[-1]
|
||||
text = text[:-1]
|
||||
lines = text.split('\n')
|
||||
return '\n'.join(map(lambda s: indention + s, lines)) + temp
|
||||
|
||||
@@ -8,7 +8,7 @@ from jedi.inference.base_value import ValueWrapper
|
||||
|
||||
class Decoratee(ValueWrapper):
|
||||
def __init__(self, wrapped_value, original_value):
|
||||
self._wrapped_value = wrapped_value
|
||||
super(Decoratee, self).__init__(wrapped_value)
|
||||
self._original_value = original_value
|
||||
|
||||
def py__doc__(self):
|
||||
|
||||
@@ -80,9 +80,39 @@ class FunctionMixin(object):
|
||||
return LambdaName(self)
|
||||
return ValueName(self, self.tree_node.name)
|
||||
|
||||
def is_function(self):
|
||||
return True
|
||||
|
||||
def py__name__(self):
|
||||
return self.name.string_name
|
||||
|
||||
def get_type_hint(self, add_class_info=True):
|
||||
return_annotation = self.tree_node.annotation
|
||||
if return_annotation is None:
|
||||
def param_name_to_str(n):
|
||||
s = n.string_name
|
||||
annotation = n.infer().get_type_hint()
|
||||
if annotation is not None:
|
||||
s += ': ' + annotation
|
||||
if n.default_node is not None:
|
||||
s += '=' + n.default_node.get_code(include_prefix=False)
|
||||
return s
|
||||
|
||||
function_execution = self.as_context()
|
||||
result = function_execution.infer()
|
||||
return_hint = result.get_type_hint()
|
||||
body = self.py__name__() + '(%s)' % ', '.join([
|
||||
param_name_to_str(n)
|
||||
for n in function_execution.get_param_names()
|
||||
])
|
||||
if return_hint is None:
|
||||
return body
|
||||
else:
|
||||
return_hint = return_annotation.get_code(include_prefix=False)
|
||||
body = self.py__name__() + self.tree_node.children[2].get_code(include_prefix=False)
|
||||
|
||||
return body + ' -> ' + return_hint
|
||||
|
||||
def py__call__(self, arguments):
|
||||
function_execution = self.as_context(arguments)
|
||||
return function_execution.infer()
|
||||
@@ -172,9 +202,6 @@ class MethodValue(FunctionValue):
|
||||
|
||||
|
||||
class BaseFunctionExecutionContext(ValueContext, TreeContextMixin):
|
||||
def is_function_execution(self):
|
||||
return True
|
||||
|
||||
def _infer_annotations(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -198,15 +225,15 @@ class BaseFunctionExecutionContext(ValueContext, TreeContextMixin):
|
||||
returns = funcdef.iter_return_stmts()
|
||||
|
||||
for r in returns:
|
||||
check = flow_analysis.reachability_check(self, funcdef, r)
|
||||
if check is flow_analysis.UNREACHABLE:
|
||||
debug.dbg('Return unreachable: %s', r)
|
||||
if check_yields:
|
||||
value_set |= ValueSet.from_sets(
|
||||
lazy_value.infer()
|
||||
for lazy_value in self._get_yield_lazy_value(r)
|
||||
)
|
||||
else:
|
||||
if check_yields:
|
||||
value_set |= ValueSet.from_sets(
|
||||
lazy_value.infer()
|
||||
for lazy_value in self._get_yield_lazy_value(r)
|
||||
)
|
||||
check = flow_analysis.reachability_check(self, funcdef, r)
|
||||
if check is flow_analysis.UNREACHABLE:
|
||||
debug.dbg('Return unreachable: %s', r)
|
||||
else:
|
||||
try:
|
||||
children = r.children
|
||||
@@ -215,9 +242,9 @@ class BaseFunctionExecutionContext(ValueContext, TreeContextMixin):
|
||||
value_set |= ValueSet([ctx])
|
||||
else:
|
||||
value_set |= self.infer_node(children[1])
|
||||
if check is flow_analysis.REACHABLE:
|
||||
debug.dbg('Return reachable: %s', r)
|
||||
break
|
||||
if check is flow_analysis.REACHABLE:
|
||||
debug.dbg('Return reachable: %s', r)
|
||||
break
|
||||
return value_set
|
||||
|
||||
def _get_yield_lazy_value(self, yield_expr):
|
||||
@@ -262,7 +289,7 @@ class BaseFunctionExecutionContext(ValueContext, TreeContextMixin):
|
||||
else:
|
||||
types = self.get_return_values(check_yields=True)
|
||||
if types:
|
||||
yield LazyKnownValues(types)
|
||||
yield LazyKnownValues(types, min=0, max=float('inf'))
|
||||
return
|
||||
last_for_stmt = for_stmt
|
||||
|
||||
@@ -396,6 +423,9 @@ class OverloadedFunctionValue(FunctionMixin, ValueWrapper):
|
||||
def get_signature_functions(self):
|
||||
return self._overloaded_functions
|
||||
|
||||
def get_type_hint(self, add_class_info=True):
|
||||
return 'Union[%s]' % ', '.join(f.get_type_hint() for f in self._overloaded_functions)
|
||||
|
||||
|
||||
def _find_overload_functions(context, tree_node):
|
||||
def _is_overload_decorated(funcdef):
|
||||
|
||||
@@ -5,7 +5,7 @@ from parso.python.tree import search_ancestor
|
||||
from jedi import debug
|
||||
from jedi import settings
|
||||
from jedi.inference import compiled
|
||||
from jedi.inference.compiled.value import CompiledObjectFilter
|
||||
from jedi.inference.compiled.value import CompiledValueFilter
|
||||
from jedi.inference.helpers import values_from_qualified_names, is_big_annoying_library
|
||||
from jedi.inference.filters import AbstractFilter, AnonymousFunctionExecutionFilter
|
||||
from jedi.inference.names import ValueName, TreeNameDefinition, ParamName, \
|
||||
@@ -130,6 +130,9 @@ class AbstractInstanceValue(Value):
|
||||
for name in names
|
||||
)
|
||||
|
||||
def get_type_hint(self, add_class_info=True):
|
||||
return self.py__name__()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s of %s>" % (self.__class__.__name__, self.class_value)
|
||||
|
||||
@@ -189,20 +192,12 @@ class _BaseTreeInstance(AbstractInstanceValue):
|
||||
for f in class_filters:
|
||||
if isinstance(f, ClassFilter):
|
||||
yield InstanceClassFilter(self, f)
|
||||
elif isinstance(f, CompiledObjectFilter):
|
||||
elif isinstance(f, CompiledValueFilter):
|
||||
yield CompiledInstanceClassFilter(self, f)
|
||||
else:
|
||||
# Propably from the metaclass.
|
||||
yield f
|
||||
|
||||
def _get_annotation_init_functions(self):
|
||||
filter = next(self.class_value.get_filters())
|
||||
for init_name in filter.get('__init__'):
|
||||
for init in init_name.infer():
|
||||
if init.is_function():
|
||||
for signature in init.get_signatures():
|
||||
yield signature.value
|
||||
|
||||
@inference_state_method_cache()
|
||||
def create_instance_context(self, class_context, node):
|
||||
new = node
|
||||
@@ -246,7 +241,7 @@ class _BaseTreeInstance(AbstractInstanceValue):
|
||||
def py__getitem__(self, index_value_set, contextualized_node):
|
||||
names = self.get_function_slot_names(u'__getitem__')
|
||||
if not names:
|
||||
return super(AbstractInstanceValue, self).py__getitem__(
|
||||
return super(_BaseTreeInstance, self).py__getitem__(
|
||||
index_value_set,
|
||||
contextualized_node,
|
||||
)
|
||||
@@ -257,7 +252,7 @@ class _BaseTreeInstance(AbstractInstanceValue):
|
||||
def py__iter__(self, contextualized_node=None):
|
||||
iter_slot_names = self.get_function_slot_names(u'__iter__')
|
||||
if not iter_slot_names:
|
||||
return super(AbstractInstanceValue, self).py__iter__(contextualized_node)
|
||||
return super(_BaseTreeInstance, self).py__iter__(contextualized_node)
|
||||
|
||||
def iterate():
|
||||
for generator in self.execute_function_slots(iter_slot_names):
|
||||
@@ -283,7 +278,7 @@ class _BaseTreeInstance(AbstractInstanceValue):
|
||||
names = self.get_function_slot_names(u'__call__')
|
||||
if not names:
|
||||
# Means the Instance is not callable.
|
||||
return super(AbstractInstanceValue, self).py__call__(arguments)
|
||||
return super(_BaseTreeInstance, self).py__call__(arguments)
|
||||
|
||||
return ValueSet.from_sets(name.infer().execute(arguments) for name in names)
|
||||
|
||||
@@ -322,8 +317,7 @@ class TreeInstance(_BaseTreeInstance):
|
||||
if settings.dynamic_array_additions:
|
||||
arguments = get_dynamic_array_instance(self, arguments)
|
||||
|
||||
super(_BaseTreeInstance, self).__init__(inference_state, parent_context,
|
||||
class_value)
|
||||
super(TreeInstance, self).__init__(inference_state, parent_context, class_value)
|
||||
self._arguments = arguments
|
||||
self.tree_node = class_value.tree_node
|
||||
|
||||
@@ -338,13 +332,14 @@ class TreeInstance(_BaseTreeInstance):
|
||||
for signature in self.class_value.py__getattribute__('__init__').get_signatures():
|
||||
# Just take the first result, it should always be one, because we
|
||||
# control the typeshed code.
|
||||
if not signature.matches_signature(args) \
|
||||
or signature.value.tree_node is None:
|
||||
funcdef = signature.value.tree_node
|
||||
if funcdef is None or funcdef.type != 'funcdef' \
|
||||
or not signature.matches_signature(args):
|
||||
# First check if the signature even matches, if not we don't
|
||||
# need to infer anything.
|
||||
continue
|
||||
bound_method = BoundMethod(self, self.class_value.as_context(), signature.value)
|
||||
all_annotations = py__annotations__(signature.value.tree_node)
|
||||
all_annotations = py__annotations__(funcdef)
|
||||
type_var_dict = infer_type_vars_for_execution(bound_method, args, all_annotations)
|
||||
if type_var_dict:
|
||||
defined, = self.class_value.define_generics(
|
||||
@@ -440,7 +435,7 @@ class CompiledInstanceClassFilter(AbstractFilter):
|
||||
return self._convert(self._class_filter.values())
|
||||
|
||||
def _convert(self, names):
|
||||
klass = self._class_filter.compiled_object
|
||||
klass = self._class_filter.compiled_value
|
||||
return [
|
||||
CompiledInstanceName(self._instance.inference_state, self._instance, klass, n)
|
||||
for n in names
|
||||
@@ -552,10 +547,10 @@ class InstanceClassFilter(AbstractFilter):
|
||||
self._class_filter = class_filter
|
||||
|
||||
def get(self, name):
|
||||
return self._convert(self._class_filter.get(name, from_instance=True))
|
||||
return self._convert(self._class_filter.get(name))
|
||||
|
||||
def values(self):
|
||||
return self._convert(self._class_filter.values(from_instance=True))
|
||||
return self._convert(self._class_filter.values())
|
||||
|
||||
def _convert(self, names):
|
||||
return [
|
||||
@@ -591,7 +586,7 @@ class SelfAttributeFilter(ClassFilter):
|
||||
if trailer.type == 'trailer' \
|
||||
and len(trailer.parent.children) == 2 \
|
||||
and trailer.children[0] == '.':
|
||||
if name.is_definition() and self._access_possible(name, from_instance=True):
|
||||
if name.is_definition() and self._access_possible(name):
|
||||
# TODO filter non-self assignments instead of this bad
|
||||
# filter.
|
||||
if self._is_in_right_scope(trailer.parent.children[0], name):
|
||||
|
||||
@@ -140,7 +140,6 @@ class ComprehensionMixin(object):
|
||||
input_node = comp_for.children[3]
|
||||
parent_context = parent_context or self._defining_context
|
||||
input_types = parent_context.infer_node(input_node)
|
||||
# TODO: simulate await if self.is_async
|
||||
|
||||
cn = ContextualizedNode(parent_context, input_node)
|
||||
iterated = input_types.iterate(cn, is_async=is_async)
|
||||
@@ -329,6 +328,11 @@ class SequenceLiteralValue(Sequence):
|
||||
self.array_type = SequenceLiteralValue.mapping[atom.children[0]]
|
||||
"""The builtin name of the array (list, set, tuple or dict)."""
|
||||
|
||||
def _get_generics(self):
|
||||
if self.array_type == u'tuple':
|
||||
return tuple(x.infer().py__class__() for x in self.py__iter__())
|
||||
return super(SequenceLiteralValue, self)._get_generics()
|
||||
|
||||
def py__simple_getitem__(self, index):
|
||||
"""Here the index is an int/str. Raises IndexError/KeyError."""
|
||||
if isinstance(index, slice):
|
||||
@@ -426,10 +430,10 @@ class DictLiteralValue(_DictMixin, SequenceLiteralValue, _DictKeyMixin):
|
||||
|
||||
def py__simple_getitem__(self, index):
|
||||
"""Here the index is an int/str. Raises IndexError/KeyError."""
|
||||
compiled_obj_index = compiled.create_simple_object(self.inference_state, index)
|
||||
compiled_value_index = compiled.create_simple_object(self.inference_state, index)
|
||||
for key, value in self.get_tree_entries():
|
||||
for k in self._defining_context.infer_node(key):
|
||||
for key_v in k.execute_operation(compiled_obj_index, u'=='):
|
||||
for key_v in k.execute_operation(compiled_value_index, u'=='):
|
||||
if key_v.get_safe_value():
|
||||
return self._defining_context.infer_node(value)
|
||||
raise SimpleGetItemNotFound('No key found in dictionary %s.' % self)
|
||||
@@ -578,33 +582,31 @@ class MergedArray(Sequence):
|
||||
return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
|
||||
|
||||
|
||||
def unpack_tuple_to_dict(value, types, exprlist):
|
||||
def unpack_tuple_to_dict(context, types, exprlist):
|
||||
"""
|
||||
Unpacking tuple assignments in for statements and expr_stmts.
|
||||
"""
|
||||
if exprlist.type == 'name':
|
||||
return {exprlist.value: types}
|
||||
elif exprlist.type == 'atom' and exprlist.children[0] in ('(', '['):
|
||||
return unpack_tuple_to_dict(value, types, exprlist.children[1])
|
||||
return unpack_tuple_to_dict(context, types, exprlist.children[1])
|
||||
elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',
|
||||
'testlist_star_expr'):
|
||||
dct = {}
|
||||
parts = iter(exprlist.children[::2])
|
||||
n = 0
|
||||
for lazy_value in types.iterate(exprlist):
|
||||
for lazy_value in types.iterate(ContextualizedNode(context, exprlist)):
|
||||
n += 1
|
||||
try:
|
||||
part = next(parts)
|
||||
except StopIteration:
|
||||
# TODO this value is probably not right.
|
||||
analysis.add(value, 'value-error-too-many-values', part,
|
||||
analysis.add(context, 'value-error-too-many-values', part,
|
||||
message="ValueError: too many values to unpack (expected %s)" % n)
|
||||
else:
|
||||
dct.update(unpack_tuple_to_dict(value, lazy_value.infer(), part))
|
||||
dct.update(unpack_tuple_to_dict(context, lazy_value.infer(), part))
|
||||
has_parts = next(parts, None)
|
||||
if types and has_parts is not None:
|
||||
# TODO this value is probably not right.
|
||||
analysis.add(value, 'value-error-too-few-values', has_parts,
|
||||
analysis.add(context, 'value-error-too-few-values', has_parts,
|
||||
message="ValueError: need more than %s values to unpack" % n)
|
||||
return dct
|
||||
elif exprlist.type == 'power' or exprlist.type == 'atom_expr':
|
||||
@@ -634,7 +636,7 @@ class Slice(LazyValueWrapper):
|
||||
|
||||
def get_safe_value(self, default=sentinel):
|
||||
"""
|
||||
Imitate CompiledObject.obj behavior and return a ``builtin.slice()``
|
||||
Imitate CompiledValue.obj behavior and return a ``builtin.slice()``
|
||||
object.
|
||||
"""
|
||||
def get(element):
|
||||
|
||||
@@ -38,11 +38,11 @@ py__doc__() Returns the docstring for a value.
|
||||
"""
|
||||
from jedi import debug
|
||||
from jedi._compatibility import use_metaclass
|
||||
from jedi.parser_utils import get_cached_parent_scope
|
||||
from jedi.parser_utils import get_cached_parent_scope, expr_is_dotted
|
||||
from jedi.inference.cache import inference_state_method_cache, CachedMetaClass, \
|
||||
inference_state_method_generator_cache
|
||||
from jedi.inference import compiled
|
||||
from jedi.inference.lazy_value import LazyKnownValues
|
||||
from jedi.inference.lazy_value import LazyKnownValues, LazyTreeValue
|
||||
from jedi.inference.filters import ParserTreeFilter
|
||||
from jedi.inference.names import TreeNameDefinition, ValueName
|
||||
from jedi.inference.arguments import unpack_arglist, ValuesArguments
|
||||
@@ -104,27 +104,31 @@ class ClassFilter(ParserTreeFilter):
|
||||
node = get_cached_parent_scope(self._used_names, node)
|
||||
return False
|
||||
|
||||
def _access_possible(self, name, from_instance=False):
|
||||
def _access_possible(self, name):
|
||||
# Filter for ClassVar variables
|
||||
# TODO this is not properly done, yet. It just checks for the string
|
||||
# ClassVar in the annotation, which can be quite imprecise. If we
|
||||
# wanted to do this correct, we would have to infer the ClassVar.
|
||||
if not from_instance:
|
||||
if not self._is_instance:
|
||||
expr_stmt = name.get_definition()
|
||||
if expr_stmt is not None and expr_stmt.type == 'expr_stmt':
|
||||
annassign = expr_stmt.children[1]
|
||||
if annassign.type == 'annassign':
|
||||
# TODO this is not proper matching
|
||||
if 'ClassVar' not in annassign.children[1].get_code():
|
||||
|
||||
# If there is an =, the variable is obviously also
|
||||
# defined on the class.
|
||||
if 'ClassVar' not in annassign.children[1].get_code() \
|
||||
and '=' not in annassign.children:
|
||||
return False
|
||||
|
||||
# Filter for name mangling of private variables like __foo
|
||||
return not name.value.startswith('__') or name.value.endswith('__') \
|
||||
or self._equals_origin_scope()
|
||||
|
||||
def _filter(self, names, from_instance=False):
|
||||
def _filter(self, names):
|
||||
names = super(ClassFilter, self)._filter(names)
|
||||
return [name for name in names if self._access_possible(name, from_instance)]
|
||||
return [name for name in names if self._access_possible(name)]
|
||||
|
||||
|
||||
class ClassMixin(object):
|
||||
@@ -133,6 +137,10 @@ class ClassMixin(object):
|
||||
|
||||
def py__call__(self, arguments=None):
|
||||
from jedi.inference.value import TreeInstance
|
||||
|
||||
from jedi.inference.gradual.typing import TypedDict
|
||||
if self.is_typeddict():
|
||||
return ValueSet([TypedDict(self)])
|
||||
return ValueSet([TreeInstance(self.inference_state, self.parent_context, self, arguments)])
|
||||
|
||||
def py__class__(self):
|
||||
@@ -145,12 +153,6 @@ class ClassMixin(object):
|
||||
def py__name__(self):
|
||||
return self.name.string_name
|
||||
|
||||
def get_param_names(self):
|
||||
for value_ in self.py__getattribute__(u'__init__'):
|
||||
if value_.is_function():
|
||||
return list(value_.get_param_names())[1:]
|
||||
return []
|
||||
|
||||
@inference_state_method_generator_cache()
|
||||
def py__mro__(self):
|
||||
mro = [self]
|
||||
@@ -191,7 +193,7 @@ class ClassMixin(object):
|
||||
yield f
|
||||
|
||||
for cls in self.py__mro__():
|
||||
if isinstance(cls, compiled.CompiledObject):
|
||||
if cls.is_compiled():
|
||||
for filter in cls.get_filters(is_instance=is_instance):
|
||||
yield filter
|
||||
else:
|
||||
@@ -212,9 +214,11 @@ class ClassMixin(object):
|
||||
for instance in type_.py__call__(args):
|
||||
instance_filters = instance.get_filters()
|
||||
# Filter out self filters
|
||||
next(instance_filters)
|
||||
next(instance_filters)
|
||||
yield next(instance_filters)
|
||||
next(instance_filters, None)
|
||||
next(instance_filters, None)
|
||||
x = next(instance_filters, None)
|
||||
assert x is not None
|
||||
yield x
|
||||
|
||||
def get_signatures(self):
|
||||
# Since calling staticmethod without a function is illegal, the Jedi
|
||||
@@ -227,6 +231,41 @@ class ClassMixin(object):
|
||||
def _as_context(self):
|
||||
return ClassContext(self)
|
||||
|
||||
def get_type_hint(self, add_class_info=True):
|
||||
if add_class_info:
|
||||
return 'Type[%s]' % self.py__name__()
|
||||
return self.py__name__()
|
||||
|
||||
@inference_state_method_cache(default=False)
|
||||
def is_typeddict(self):
|
||||
# TODO Do a proper mro resolution. Currently we are just listing
|
||||
# classes. However, it's a complicated algorithm.
|
||||
from jedi.inference.gradual.typing import TypedDictBase
|
||||
for lazy_cls in self.py__bases__():
|
||||
if not isinstance(lazy_cls, LazyTreeValue):
|
||||
return False
|
||||
tree_node = lazy_cls.data
|
||||
# Only resolve simple classes, stuff like Iterable[str] are more
|
||||
# intensive to resolve and if generics are involved, we know it's
|
||||
# not a TypedDict.
|
||||
if not expr_is_dotted(tree_node):
|
||||
return False
|
||||
|
||||
for cls in lazy_cls.infer():
|
||||
if isinstance(cls, TypedDictBase):
|
||||
return True
|
||||
try:
|
||||
method = cls.is_typeddict
|
||||
except AttributeError:
|
||||
# We're only dealing with simple classes, so just returning
|
||||
# here should be fine. This only happens with e.g. compiled
|
||||
# classes.
|
||||
return False
|
||||
else:
|
||||
if method():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class ClassValue(use_metaclass(CachedMetaClass, ClassMixin, FunctionAndClassBase)):
|
||||
api_type = u'class'
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import re
|
||||
import os
|
||||
|
||||
from jedi import debug
|
||||
from jedi.inference.cache import inference_state_method_cache
|
||||
from jedi.inference.names import AbstractNameDefinition, ModuleName
|
||||
from jedi.inference.filters import GlobalNameFilter, ParserTreeFilter, DictFilter, MergedFilter
|
||||
@@ -37,32 +35,6 @@ class _ModuleAttributeName(AbstractNameDefinition):
|
||||
return compiled.get_string_value_set(self.parent_context.inference_state)
|
||||
|
||||
|
||||
def iter_module_names(inference_state, paths):
|
||||
# Python modules/packages
|
||||
for n in inference_state.compiled_subprocess.list_module_names(paths):
|
||||
yield n
|
||||
|
||||
for path in paths:
|
||||
try:
|
||||
dirs = os.listdir(path)
|
||||
except OSError:
|
||||
# The file might not exist or reading it might lead to an error.
|
||||
debug.warning("Not possible to list directory: %s", path)
|
||||
continue
|
||||
for name in dirs:
|
||||
# Namespaces
|
||||
if os.path.isdir(os.path.join(path, name)):
|
||||
# pycache is obviously not an interestin namespace. Also the
|
||||
# name must be a valid identifier.
|
||||
# TODO use str.isidentifier, once Python 2 is removed
|
||||
if name != '__pycache__' and not re.search(r'\W|^\d', name):
|
||||
yield name
|
||||
# Stub files
|
||||
if name.endswith('.pyi'):
|
||||
if name != '__init__.pyi':
|
||||
yield name[:-4]
|
||||
|
||||
|
||||
class SubModuleDictMixin(object):
|
||||
@inference_state_method_cache()
|
||||
def sub_modules_dict(self):
|
||||
@@ -72,7 +44,9 @@ class SubModuleDictMixin(object):
|
||||
"""
|
||||
names = {}
|
||||
if self.is_package():
|
||||
mods = iter_module_names(self.inference_state, self.py__path__())
|
||||
mods = self.inference_state.compiled_subprocess.iter_module_names(
|
||||
self.py__path__()
|
||||
)
|
||||
for name in mods:
|
||||
# It's obviously a relative import to the current module.
|
||||
names[name] = SubModuleName(self.as_context(), name)
|
||||
@@ -111,20 +85,7 @@ class ModuleMixin(SubModuleDictMixin):
|
||||
@property
|
||||
@inference_state_method_cache()
|
||||
def name(self):
|
||||
return self._module_name_class(self, self._string_name)
|
||||
|
||||
@property
|
||||
def _string_name(self):
|
||||
""" This is used for the goto functions. """
|
||||
# TODO It's ugly that we even use this, the name is usually well known
|
||||
# ahead so just pass it when create a ModuleValue.
|
||||
if self._path is None:
|
||||
return '' # no path -> empty name
|
||||
else:
|
||||
sep = (re.escape(os.path.sep),) * 2
|
||||
r = re.search(r'([^%s]*?)(%s__init__)?(\.pyi?|\.so)?$' % sep, self._path)
|
||||
# Remove PEP 3149 names
|
||||
return re.sub(r'\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1))
|
||||
return self._module_name_class(self, self.string_names[-1])
|
||||
|
||||
@inference_state_method_cache()
|
||||
def _module_attributes_dict(self):
|
||||
@@ -138,7 +99,9 @@ class ModuleMixin(SubModuleDictMixin):
|
||||
|
||||
def iter_star_filters(self):
|
||||
for star_module in self.star_imports():
|
||||
yield next(star_module.get_filters())
|
||||
f = next(star_module.get_filters(), None)
|
||||
assert f is not None
|
||||
yield f
|
||||
|
||||
# I'm not sure if the star import cache is really that effective anymore
|
||||
# with all the other really fast import caches. Recheck. Also we would need
|
||||
@@ -260,7 +223,7 @@ class ModuleValue(ModuleMixin, TreeValue):
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s@%s-%s is_stub=%s>" % (
|
||||
self.__class__.__name__, self._string_name,
|
||||
self.__class__.__name__, self.py__name__(),
|
||||
self.tree_node.start_pos[0], self.tree_node.end_pos[0],
|
||||
self.is_stub()
|
||||
)
|
||||
|
||||
@@ -35,6 +35,9 @@ class ImplicitNamespaceValue(Value, SubModuleDictMixin):
|
||||
def get_filters(self, origin_scope=None):
|
||||
yield DictFilter(self.sub_modules_dict())
|
||||
|
||||
def get_qualified_names(self):
|
||||
return ()
|
||||
|
||||
@property
|
||||
@inference_state_method_cache()
|
||||
def name(self):
|
||||
|
||||
@@ -267,7 +267,6 @@ def get_parent_scope(node, include_flows=False):
|
||||
continue
|
||||
return scope
|
||||
scope = scope.parent
|
||||
return scope
|
||||
|
||||
|
||||
get_cached_parent_scope = _get_parent_scope_cache(get_parent_scope)
|
||||
@@ -293,6 +292,25 @@ def cut_value_at_position(leaf, position):
|
||||
return ''.join(lines)
|
||||
|
||||
|
||||
def expr_is_dotted(node):
|
||||
"""
|
||||
Checks if a path looks like `name` or `name.foo.bar` and not `name()`.
|
||||
"""
|
||||
if node.type == 'atom':
|
||||
if len(node.children) == 3 and node.children[0] == '(':
|
||||
return expr_is_dotted(node.children[1])
|
||||
return False
|
||||
if node.type == 'atom_expr':
|
||||
children = node.children
|
||||
if children[0] == 'await':
|
||||
return False
|
||||
if not expr_is_dotted(children[0]):
|
||||
return False
|
||||
# Check trailers
|
||||
return all(c.children[0] == '.' for c in children[1:])
|
||||
return node.type == 'name'
|
||||
|
||||
|
||||
def _function_is_x_method(method_name):
|
||||
def wrapper(function_node):
|
||||
"""
|
||||
|
||||
@@ -44,8 +44,6 @@ from operator import itemgetter as _itemgetter
|
||||
from collections import OrderedDict
|
||||
|
||||
class {typename}(tuple):
|
||||
'{typename}({arg_list})'
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
_fields = {field_names!r}
|
||||
@@ -160,7 +158,6 @@ def argument_clinic(string, want_value=False, want_context=False,
|
||||
callback = kwargs.pop('callback')
|
||||
assert not kwargs # Python 2...
|
||||
debug.dbg('builtin start %s' % value, color='MAGENTA')
|
||||
result = NO_VALUES
|
||||
if want_context:
|
||||
kwargs['context'] = arguments.context
|
||||
if want_value:
|
||||
@@ -473,13 +470,12 @@ def collections_namedtuple(value, arguments, callback):
|
||||
return ValueSet([ClassValue(inference_state, parent_context, generated_class)])
|
||||
|
||||
|
||||
class PartialObject(object):
|
||||
def __init__(self, actual_value, arguments):
|
||||
class PartialObject(ValueWrapper):
|
||||
def __init__(self, actual_value, arguments, instance=None):
|
||||
super(PartialObject, self).__init__(actual_value)
|
||||
self._actual_value = actual_value
|
||||
self._arguments = arguments
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._actual_value, name)
|
||||
self._instance = instance
|
||||
|
||||
def _get_function(self, unpacked_arguments):
|
||||
key, lazy_value = next(unpacked_arguments, (None, None))
|
||||
@@ -495,6 +491,8 @@ class PartialObject(object):
|
||||
return []
|
||||
|
||||
arg_count = 0
|
||||
if self._instance is not None:
|
||||
arg_count = 1
|
||||
keys = set()
|
||||
for key, _ in unpacked_arguments:
|
||||
if key is None:
|
||||
@@ -509,9 +507,17 @@ class PartialObject(object):
|
||||
return NO_VALUES
|
||||
|
||||
return func.execute(
|
||||
MergedPartialArguments(self._arguments, arguments)
|
||||
MergedPartialArguments(self._arguments, arguments, self._instance)
|
||||
)
|
||||
|
||||
def py__get__(self, instance, class_value):
|
||||
return ValueSet([self])
|
||||
|
||||
|
||||
class PartialMethodObject(PartialObject):
|
||||
def py__get__(self, instance, class_value):
|
||||
return ValueSet([PartialObject(self._actual_value, self._arguments, instance)])
|
||||
|
||||
|
||||
class PartialSignature(SignatureWrapper):
|
||||
def __init__(self, wrapped_signature, skipped_arg_count, skipped_arg_set):
|
||||
@@ -525,15 +531,18 @@ class PartialSignature(SignatureWrapper):
|
||||
|
||||
|
||||
class MergedPartialArguments(AbstractArguments):
|
||||
def __init__(self, partial_arguments, call_arguments):
|
||||
def __init__(self, partial_arguments, call_arguments, instance=None):
|
||||
self._partial_arguments = partial_arguments
|
||||
self._call_arguments = call_arguments
|
||||
self._instance = instance
|
||||
|
||||
def unpack(self, funcdef=None):
|
||||
unpacked = self._partial_arguments.unpack(funcdef)
|
||||
# Ignore this one, it's the function. It was checked before that it's
|
||||
# there.
|
||||
next(unpacked)
|
||||
next(unpacked, None)
|
||||
if self._instance is not None:
|
||||
yield None, LazyKnownValue(self._instance)
|
||||
for key_lazy_value in unpacked:
|
||||
yield key_lazy_value
|
||||
for key_lazy_value in self._call_arguments.unpack(funcdef):
|
||||
@@ -547,6 +556,13 @@ def functools_partial(value, arguments, callback):
|
||||
)
|
||||
|
||||
|
||||
def functools_partialmethod(value, arguments, callback):
|
||||
return ValueSet(
|
||||
PartialMethodObject(instance, arguments)
|
||||
for instance in value.py__call__(arguments)
|
||||
)
|
||||
|
||||
|
||||
@argument_clinic('first, /')
|
||||
def _return_first_param(firsts):
|
||||
return firsts
|
||||
@@ -744,6 +760,7 @@ _implemented = {
|
||||
},
|
||||
'functools': {
|
||||
'partial': functools_partial,
|
||||
'partialmethod': functools_partialmethod,
|
||||
'wraps': _functools_wraps,
|
||||
},
|
||||
'_weakref': {
|
||||
|
||||
@@ -1,203 +0,0 @@
|
||||
"""
|
||||
THIS is not in active development, please check
|
||||
https://github.com/davidhalter/jedi/issues/667 first before editing.
|
||||
|
||||
Introduce some basic refactoring functions to |jedi|. This module is still in a
|
||||
very early development stage and needs much testing and improvement.
|
||||
|
||||
.. warning:: I won't do too much here, but if anyone wants to step in, please
|
||||
do. Refactoring is none of my priorities
|
||||
|
||||
It uses the |jedi| `API <api.html>`_ and supports currently the
|
||||
following functions (sometimes bug-prone):
|
||||
|
||||
- rename
|
||||
- extract variable
|
||||
- inline variable
|
||||
"""
|
||||
import difflib
|
||||
|
||||
from parso import python_bytes_to_unicode, split_lines
|
||||
from jedi.inference import helpers
|
||||
|
||||
|
||||
class Refactoring(object):
|
||||
def __init__(self, change_dct):
|
||||
"""
|
||||
:param change_dct: dict(old_path=(new_path, old_lines, new_lines))
|
||||
"""
|
||||
self.change_dct = change_dct
|
||||
|
||||
def old_files(self):
|
||||
dct = {}
|
||||
for old_path, (new_path, old_l, new_l) in self.change_dct.items():
|
||||
dct[old_path] = '\n'.join(old_l)
|
||||
return dct
|
||||
|
||||
def new_files(self):
|
||||
dct = {}
|
||||
for old_path, (new_path, old_l, new_l) in self.change_dct.items():
|
||||
dct[new_path] = '\n'.join(new_l)
|
||||
return dct
|
||||
|
||||
def diff(self):
|
||||
texts = []
|
||||
for old_path, (new_path, old_l, new_l) in self.change_dct.items():
|
||||
if old_path:
|
||||
udiff = difflib.unified_diff(old_l, new_l)
|
||||
else:
|
||||
udiff = difflib.unified_diff(old_l, new_l, old_path, new_path)
|
||||
texts.append('\n'.join(udiff))
|
||||
return '\n'.join(texts)
|
||||
|
||||
|
||||
def rename(script, new_name):
|
||||
""" The `args` / `kwargs` params are the same as in `api.Script`.
|
||||
:param new_name: The new name of the script.
|
||||
:param script: The source Script object.
|
||||
:return: list of changed lines/changed files
|
||||
"""
|
||||
return Refactoring(_rename(script.get_references(), new_name))
|
||||
|
||||
|
||||
def _rename(names, replace_str):
|
||||
""" For both rename and inline. """
|
||||
order = sorted(names, key=lambda x: (x.module_path, x.line, x.column),
|
||||
reverse=True)
|
||||
|
||||
def process(path, old_lines, new_lines):
|
||||
if new_lines is not None: # goto next file, save last
|
||||
dct[path] = path, old_lines, new_lines
|
||||
|
||||
dct = {}
|
||||
current_path = object()
|
||||
new_lines = old_lines = None
|
||||
for name in order:
|
||||
if name.in_builtin_module():
|
||||
continue
|
||||
if current_path != name.module_path:
|
||||
current_path = name.module_path
|
||||
|
||||
process(current_path, old_lines, new_lines)
|
||||
if current_path is not None:
|
||||
# None means take the source that is a normal param.
|
||||
with open(current_path) as f:
|
||||
source = f.read()
|
||||
|
||||
new_lines = split_lines(python_bytes_to_unicode(source))
|
||||
old_lines = new_lines[:]
|
||||
|
||||
nr, indent = name.line, name.column
|
||||
line = new_lines[nr - 1]
|
||||
new_lines[nr - 1] = line[:indent] + replace_str + \
|
||||
line[indent + len(name.name):]
|
||||
process(current_path, old_lines, new_lines)
|
||||
return dct
|
||||
|
||||
|
||||
def extract(script, new_name):
|
||||
""" The `args` / `kwargs` params are the same as in `api.Script`.
|
||||
:param operation: The refactoring operation to execute.
|
||||
:type operation: str
|
||||
:type source: str
|
||||
:return: list of changed lines/changed files
|
||||
"""
|
||||
new_lines = split_lines(python_bytes_to_unicode(script.source))
|
||||
old_lines = new_lines[:]
|
||||
|
||||
user_stmt = script._parser.user_stmt()
|
||||
|
||||
# TODO care for multi-line extracts
|
||||
dct = {}
|
||||
if user_stmt:
|
||||
pos = script._pos
|
||||
line_index = pos[0] - 1
|
||||
# Be careful here. 'array_for_pos' does not exist in 'helpers'.
|
||||
arr, index = helpers.array_for_pos(user_stmt, pos)
|
||||
if arr is not None:
|
||||
start_pos = arr[index].start_pos
|
||||
end_pos = arr[index].end_pos
|
||||
|
||||
# take full line if the start line is different from end line
|
||||
e = end_pos[1] if end_pos[0] == start_pos[0] else None
|
||||
start_line = new_lines[start_pos[0] - 1]
|
||||
text = start_line[start_pos[1]:e]
|
||||
for l in range(start_pos[0], end_pos[0] - 1):
|
||||
text += '\n' + str(l)
|
||||
if e is None:
|
||||
end_line = new_lines[end_pos[0] - 1]
|
||||
text += '\n' + end_line[:end_pos[1]]
|
||||
|
||||
# remove code from new lines
|
||||
t = text.lstrip()
|
||||
del_start = start_pos[1] + len(text) - len(t)
|
||||
|
||||
text = t.rstrip()
|
||||
del_end = len(t) - len(text)
|
||||
if e is None:
|
||||
new_lines[end_pos[0] - 1] = end_line[end_pos[1] - del_end:]
|
||||
e = len(start_line)
|
||||
else:
|
||||
e = e - del_end
|
||||
start_line = start_line[:del_start] + new_name + start_line[e:]
|
||||
new_lines[start_pos[0] - 1] = start_line
|
||||
new_lines[start_pos[0]:end_pos[0] - 1] = []
|
||||
|
||||
# add parentheses in multi-line case
|
||||
open_brackets = ['(', '[', '{']
|
||||
close_brackets = [')', ']', '}']
|
||||
if '\n' in text and not (text[0] in open_brackets and text[-1]
|
||||
== close_brackets[open_brackets.index(text[0])]):
|
||||
text = '(%s)' % text
|
||||
|
||||
# add new line before statement
|
||||
indent = user_stmt.start_pos[1]
|
||||
new = "%s%s = %s" % (' ' * indent, new_name, text)
|
||||
new_lines.insert(line_index, new)
|
||||
dct[script.path] = script.path, old_lines, new_lines
|
||||
return Refactoring(dct)
|
||||
|
||||
|
||||
def inline(script):
|
||||
"""
|
||||
:type script: api.Script
|
||||
"""
|
||||
new_lines = split_lines(python_bytes_to_unicode(script.source))
|
||||
|
||||
dct = {}
|
||||
|
||||
definitions = script.goto()
|
||||
assert len(definitions) == 1
|
||||
stmt = definitions[0]._definition
|
||||
references = script.get_references()
|
||||
inlines = [r for r in references
|
||||
if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos]
|
||||
inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column),
|
||||
reverse=True)
|
||||
expression_list = stmt.expression_list()
|
||||
# don't allow multi-line refactorings for now.
|
||||
assert stmt.start_pos[0] == stmt.end_pos[0]
|
||||
index = stmt.start_pos[0] - 1
|
||||
|
||||
line = new_lines[index]
|
||||
replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1]
|
||||
replace_str = replace_str.strip()
|
||||
# tuples need parentheses
|
||||
if expression_list and expression_list[0].type == 'TODO':
|
||||
arr = expression_list[0]
|
||||
if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1:
|
||||
replace_str = '(%s)' % replace_str
|
||||
|
||||
# if it's the only assignment, remove the statement
|
||||
if len(stmt.get_defined_names()) == 1:
|
||||
line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:]
|
||||
|
||||
dct = _rename(inlines, replace_str)
|
||||
# remove the empty line
|
||||
new_lines = dct[script.path][2]
|
||||
if line.strip():
|
||||
new_lines[index] = line
|
||||
else:
|
||||
new_lines.pop(index)
|
||||
|
||||
return Refactoring(dct)
|
||||
@@ -17,14 +17,12 @@ Completion output
|
||||
|
||||
.. autodata:: case_insensitive_completion
|
||||
.. autodata:: add_bracket_after_function
|
||||
.. autodata:: no_completion_duplicates
|
||||
|
||||
|
||||
Filesystem cache
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autodata:: cache_directory
|
||||
.. autodata:: use_filesystem_cache
|
||||
|
||||
|
||||
Parser
|
||||
@@ -39,7 +37,6 @@ Dynamic stuff
|
||||
.. autodata:: dynamic_array_additions
|
||||
.. autodata:: dynamic_params
|
||||
.. autodata:: dynamic_params_for_other_modules
|
||||
.. autodata:: additional_dynamic_modules
|
||||
.. autodata:: auto_import_modules
|
||||
|
||||
|
||||
@@ -54,35 +51,23 @@ import os
|
||||
import platform
|
||||
|
||||
# ----------------
|
||||
# completion output settings
|
||||
# Completion Output Settings
|
||||
# ----------------
|
||||
|
||||
case_insensitive_completion = True
|
||||
"""
|
||||
The completion is by default case insensitive.
|
||||
Completions are by default case insensitive.
|
||||
"""
|
||||
|
||||
add_bracket_after_function = False
|
||||
"""
|
||||
Adds an opening bracket after a function, because that's normal behaviour.
|
||||
Removed it again, because in VIM that is not very practical.
|
||||
"""
|
||||
|
||||
no_completion_duplicates = True
|
||||
"""
|
||||
If set, completions with the same name don't appear in the output anymore,
|
||||
but are in the `same_name_completions` attribute.
|
||||
Adds an opening bracket after a function for completions.
|
||||
"""
|
||||
|
||||
# ----------------
|
||||
# Filesystem cache
|
||||
# Filesystem Cache
|
||||
# ----------------
|
||||
|
||||
use_filesystem_cache = True
|
||||
"""
|
||||
Use filesystem cache to save once parsed files with pickle.
|
||||
"""
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
_cache_directory = os.path.join(os.getenv('APPDATA') or '~', 'Jedi',
|
||||
'Jedi')
|
||||
@@ -97,31 +82,32 @@ The path where the cache is stored.
|
||||
|
||||
On Linux, this defaults to ``~/.cache/jedi/``, on OS X to
|
||||
``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``.
|
||||
On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
|
||||
On Linux, if the environment variable ``$XDG_CACHE_HOME`` is set,
|
||||
``$XDG_CACHE_HOME/jedi`` is used instead of the default one.
|
||||
"""
|
||||
|
||||
# ----------------
|
||||
# parser
|
||||
# Parser
|
||||
# ----------------
|
||||
|
||||
fast_parser = True
|
||||
"""
|
||||
Use the fast parser. This means that reparsing is only being done if
|
||||
something has been changed e.g. to a function. If this happens, only the
|
||||
function is being reparsed.
|
||||
Uses Parso's diff parser. If it is enabled, this might cause issues, please
|
||||
read the warning on :class:`.Script`. This feature makes it possible to only
|
||||
parse the parts again that have changed, while reusing the rest of the syntax
|
||||
tree.
|
||||
"""
|
||||
|
||||
_cropped_file_size = 10e6 # 1 Megabyte
|
||||
"""
|
||||
Jedi gets extremely slow if the file size exceed a few thousand lines.
|
||||
To avoid getting stuck completely Jedi crops the file this point.
|
||||
To avoid getting stuck completely Jedi crops the file at some point.
|
||||
|
||||
One megabyte of typical Python code equals about 20'000 lines of code.
|
||||
"""
|
||||
|
||||
# ----------------
|
||||
# dynamic stuff
|
||||
# Dynamic Stuff
|
||||
# ----------------
|
||||
|
||||
dynamic_array_additions = True
|
||||
@@ -140,12 +126,6 @@ dynamic_params_for_other_modules = True
|
||||
Do the same for other modules.
|
||||
"""
|
||||
|
||||
additional_dynamic_modules = []
|
||||
"""
|
||||
Additional modules in which |jedi| checks if statements are to be found. This
|
||||
is practical for IDEs, that want to administrate their modules themselves.
|
||||
"""
|
||||
|
||||
dynamic_flow_information = True
|
||||
"""
|
||||
Check for `isinstance` and other information to infer a type.
|
||||
@@ -155,13 +135,13 @@ auto_import_modules = [
|
||||
'gi', # This third-party repository (GTK stuff) doesn't really work with jedi
|
||||
]
|
||||
"""
|
||||
Modules that are not analyzed but imported, although they contain Python code.
|
||||
Modules that will not be analyzed but imported, if they contain Python code.
|
||||
This improves autocompletion for libraries that use ``setattr`` or
|
||||
``globals()`` modifications a lot.
|
||||
"""
|
||||
|
||||
# ----------------
|
||||
# caching validity (time)
|
||||
# Caching Validity
|
||||
# ----------------
|
||||
|
||||
call_signatures_validity = 3.0
|
||||
|
||||
@@ -19,15 +19,14 @@ READLINE_DEBUG = False
|
||||
|
||||
def setup_readline(namespace_module=__main__, fuzzy=False):
|
||||
"""
|
||||
Install Jedi completer to :mod:`readline`.
|
||||
This function sets up :mod:`readline` to use Jedi in a Python interactive
|
||||
shell.
|
||||
|
||||
This function setups :mod:`readline` to use Jedi in Python interactive
|
||||
shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically
|
||||
If you want to use a custom ``PYTHONSTARTUP`` file (typically
|
||||
``$HOME/.pythonrc.py``), you can add this piece of code::
|
||||
|
||||
try:
|
||||
from jedi.utils import setup_readline
|
||||
setup_readline()
|
||||
except ImportError:
|
||||
# Fallback to the stdlib readline completer if it is installed.
|
||||
# Taken from http://docs.python.org/2/library/rlcompleter.html
|
||||
@@ -38,6 +37,8 @@ def setup_readline(namespace_module=__main__, fuzzy=False):
|
||||
readline.parse_and_bind("tab: complete")
|
||||
except ImportError:
|
||||
print("Readline is not installed either. No tab completion is enabled.")
|
||||
else:
|
||||
setup_readline()
|
||||
|
||||
This will fallback to the readline completer if Jedi is not installed.
|
||||
The readline completer will only complete names in the global namespace,
|
||||
@@ -45,18 +46,18 @@ def setup_readline(namespace_module=__main__, fuzzy=False):
|
||||
|
||||
ran<TAB>
|
||||
|
||||
will complete to ``range``
|
||||
will complete to ``range``.
|
||||
|
||||
with both Jedi and readline, but::
|
||||
With Jedi the following code::
|
||||
|
||||
range(10).cou<TAB>
|
||||
|
||||
will show complete to ``range(10).count`` only with Jedi.
|
||||
will complete to ``range(10).count``, this does not work with the default
|
||||
cPython :mod:`readline` completer.
|
||||
|
||||
You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to
|
||||
You will also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to
|
||||
your shell profile (usually ``.bash_profile`` or ``.profile`` if you use
|
||||
bash).
|
||||
|
||||
"""
|
||||
if READLINE_DEBUG:
|
||||
logging.basicConfig(
|
||||
|
||||
@@ -1 +1 @@
|
||||
parso>=0.5.2
|
||||
parso>=0.7.0
|
||||
|
||||
7
setup.py
7
setup.py
@@ -33,16 +33,16 @@ setup(name='jedi',
|
||||
keywords='python completion refactoring vim',
|
||||
long_description=readme,
|
||||
packages=find_packages(exclude=['test', 'test.*']),
|
||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
|
||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
|
||||
install_requires=install_requires,
|
||||
extras_require={
|
||||
'testing': [
|
||||
# Pytest 5 doesn't support Python 2 and Python 3.4 anymore.
|
||||
# Pytest 5 doesn't support Python 2 anymore.
|
||||
'pytest>=3.9.0,<5.0.0',
|
||||
# docopt for sith doctests
|
||||
'docopt',
|
||||
# coloroma for colored debug output
|
||||
'colorama==0.4.1', # Pinned so it works for Python 3.4
|
||||
'colorama',
|
||||
],
|
||||
'qa': [
|
||||
'flake8==3.7.9',
|
||||
@@ -60,7 +60,6 @@ setup(name='jedi',
|
||||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.4',
|
||||
'Programming Language :: Python :: 3.5',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
|
||||
12
sith.py
12
sith.py
@@ -20,8 +20,7 @@ Run a specific operation
|
||||
|
||||
./sith.py run <operation> </path/to/source/file.py> <line> <col>
|
||||
|
||||
Where operation is one of completions, goto_assignments, goto_definitions,
|
||||
usages, or call_signatures.
|
||||
Where operation is one of complete, goto, infer, get_references or get_signatures.
|
||||
|
||||
Note: Line numbers start at 1; columns start at 0 (this is consistent with
|
||||
many text editors, including Emacs).
|
||||
@@ -95,6 +94,7 @@ class TestCase(object):
|
||||
args = json.load(f)
|
||||
return cls(*args)
|
||||
|
||||
# Changing this? Also update the module docstring above.
|
||||
operations = ['complete', 'goto', 'infer', 'get_references', 'get_signatures']
|
||||
|
||||
@classmethod
|
||||
@@ -151,13 +151,13 @@ class TestCase(object):
|
||||
# Three lines ought to be enough
|
||||
lower = lineno - show if lineno - show > 0 else 0
|
||||
prefix = ' |'
|
||||
for i, line in enumerate(self.script._source.split('\n')[lower:lineno]):
|
||||
for i, line in enumerate(self.script._code.split('\n')[lower:lineno]):
|
||||
print(prefix, lower + i + 1, line)
|
||||
print(prefix, ' ', ' ' * (column + len(str(lineno))), '^')
|
||||
print(prefix, ' ' * (column + len(str(lineno))), '^')
|
||||
|
||||
def show_operation(self):
|
||||
print("%s:\n" % self.operation.capitalize())
|
||||
if self.operation == 'completions':
|
||||
if self.operation == 'complete':
|
||||
self.show_completions()
|
||||
else:
|
||||
self.show_definitions()
|
||||
@@ -168,7 +168,7 @@ class TestCase(object):
|
||||
|
||||
def show_definitions(self):
|
||||
for completion in self.objects:
|
||||
print(completion.desc_with_module)
|
||||
print(completion.full_name)
|
||||
if completion.module_path is None:
|
||||
continue
|
||||
if os.path.abspath(completion.module_path) == os.path.abspath(self.path):
|
||||
|
||||
@@ -473,7 +473,7 @@ def test_func():
|
||||
#? int()
|
||||
tuple({1})[0]
|
||||
|
||||
# python >= 3.4
|
||||
# python > 2.7
|
||||
# -----------------
|
||||
# PEP 3132 Extended Iterable Unpacking (star unpacking)
|
||||
# -----------------
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user