1
0
forked from VimPlug/jedi

merge of tokenizer/dev branch

This commit is contained in:
David Halter
2013-04-20 19:23:15 +04:30
42 changed files with 1322 additions and 732 deletions

18
.coveragerc Normal file
View File

@@ -0,0 +1,18 @@
[run]
omit =
jedi/_compatibility.py
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
# Don't complain about missing debug-only code:
def __repr__
if self\.debug
# Don't complain if tests don't hit defensive assertion code:
raise AssertionError
raise NotImplementedError
# Don't complain if non-runnable code isn't run:
if 0:
if __name__ == .__main__.:

4
.gitignore vendored
View File

@@ -1,9 +1,11 @@
*~
*.swp
*.swo
*.pyc
.ropeproject
.tox
*.pyc
.coveralls.yml
.coverage
/build/
/docs/_build/
/dist/

View File

@@ -1,17 +1,19 @@
language: python
python:
- 2.5
- 2.6
- 2.7
- 3.2
env:
- TOXENV=py26
- TOXENV=py27
- TOXENV=py32
- TOXENV=py33
- TOXENV=cov
matrix:
allow_failures:
- env: TOXENV=cov
install:
- if [[ $TRAVIS_PYTHON_VERSION == '2.5' ]]; then
pip install --use-mirrors simplejson unittest2;
fi
- if [[ $TRAVIS_PYTHON_VERSION == '2.6' ]]; then
pip install --use-mirrors unittest2;
fi
- pip install --use-mirrors nose
- pip install --quiet --use-mirrors tox
script:
- cd test
- ./test.sh
- tox
after_script:
- if [ $TOXENV == "cov" ]; then
pip install --quiet --use-mirrors coveralls;
coveralls;
fi

View File

@@ -2,14 +2,13 @@ Main Authors
============
David Halter (@davidhalter)
Takafumi Arakaki (@tkf)
Code Contributors
=================
Danilo Bargen (@dbrgn)
tek (@tek)
Takafumi Arakaki (@tkf)
Yasha Borevich (@jjay)
Aaron Griffin
andviro (@andviro)

View File

@@ -6,6 +6,11 @@ Jedi - an awesome autocompletion library for Python
:target: http://travis-ci.org/davidhalter/jedi
:alt: Travis-CI build status
.. image:: https://coveralls.io/repos/davidhalter/jedi/badge.png?branch=master
:target: https://coveralls.io/r/davidhalter/jedi
:alt: Coverage Status
Jedi is an autocompletion tool for Python that can be used in IDEs/editors.
Jedi works. Jedi is fast. It understands all of the basic Python syntax
elements including many builtin functions.
@@ -86,3 +91,25 @@ API for IDEs
It's very easy to create an editor plugin that uses Jedi. See
https://jedi.readthedocs.org/en/latest/docs/plugin-api.html for more
information.
Testing
=======
The test suite depends on ``tox`` and ``pytest``::
pip install tox pytest
To run the tests for all supported Python versions::
PIP_INSECURE=t tox
If you want to test only a specific Python version (e.g. Python 2.7), it's as
easy as ::
tox -e py27
The ``PIP_INSECURE=t`` env variable is only needed for the ``py25`` target.
Tests are also run automatically on `Travis CI
<https://travis-ci.org/davidhalter/jedi/>`_.

34
conftest.py Normal file
View File

@@ -0,0 +1,34 @@
import tempfile
import shutil
import jedi
collect_ignore = ["setup.py"]
# The following hooks (pytest_configure, pytest_unconfigure) are used
# to modify `jedi.settings.cache_directory` because `clean_jedi_cache`
# has no effect during doctests. Without these hooks, doctests uses
# user's cache (e.g., ~/.cache/jedi/). We should remove this
# workaround once the problem is fixed in py.test.
#
# See:
# - https://github.com/davidhalter/jedi/pull/168
# - https://bitbucket.org/hpk42/pytest/issue/275/
jedi_cache_directory_orig = None
jedi_cache_directory_temp = None
def pytest_configure(config):
global jedi_cache_directory_orig, jedi_cache_directory_temp
jedi_cache_directory_orig = jedi.settings.cache_directory
jedi_cache_directory_temp = tempfile.mkdtemp(prefix='jedi-test-')
jedi.settings.cache_directory = jedi_cache_directory_temp
def pytest_unconfigure(config):
global jedi_cache_directory_orig, jedi_cache_directory_temp
jedi.settings.cache_directory = jedi_cache_directory_orig
shutil.rmtree(jedi_cache_directory_temp)

View File

@@ -5,7 +5,7 @@ A little history
The Star Wars Jedi are awesome. My Jedi software tries to imitate a little bit
of the precognition the Jedi have. There's even an awesome `scene
<http://www.youtube.com/watch?v=5BDO3pyavOY>`_ of Monty Python Jedi's :-).
<http://www.youtube.com/watch?v=5BDO3pyavOY>`_ of Monty Python Jedis :-).
But actually the name hasn't so much to do with Star Wars. It's part of my
second name.
@@ -13,13 +13,13 @@ second name.
After I explained Guido van Rossum, how some parts of my auto-completion work,
he said (we drank a beer or two):
*Oh, that worries me*
*"Oh, that worries me..."*
When it's finished, I hope he'll like it :-)
I actually started Jedi, because there were no good solutions available for
VIM. Most auto-completions just didn't work well. The only good solution was
PyCharm. I just like my good old VIM. Rope was never really intended to be an
I actually started Jedi, because there were no good solutions available for VIM.
Most auto-completions just didn't work well. The only good solution was PyCharm.
But I like my good old VIM. Rope was never really intended to be an
auto-completion (and also I really hate project folders for my Python scripts).
It's more of a refactoring suite. So I decided to do my own version of a
completion, which would execute non-dangerous code. But I soon realized, that

View File

@@ -14,10 +14,10 @@ Blackbox Tests (run.py)
.. automodule:: test.run
Regression Tests (regression.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Regression Tests (test_regression.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: test.regression
.. automodule:: test.test_regression
Refactoring Tests (refactor.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@@ -7,11 +7,69 @@ Most of the code here is necessary to support Python 2.5. Once this dependency
will be dropped, we'll get rid of most code.
"""
import sys
import imp
import os
try:
import importlib
except:
pass
is_py3k = sys.hexversion >= 0x03000000
is_py33 = sys.hexversion >= 0x03030000
is_py25 = sys.hexversion < 0x02060000
def find_module_py33(string, path=None):
mod_info = (None, None, None)
loader = None
if path is not None:
# Check for the module in the specidied path
loader = importlib.machinery.PathFinder.find_module(string, path)
else:
# Check for the module in sys.path
loader = importlib.machinery.PathFinder.find_module(string, sys.path)
if loader is None:
# Fallback to find builtins
loader = importlib.find_loader(string)
if loader is None:
raise ImportError
try:
if (loader.is_package(string)):
mod_info = (None, os.path.dirname(loader.path), True)
else:
filename = loader.get_filename(string)
if filename and os.path.exists(filename):
mod_info = (open(filename, 'U'), filename, False)
else:
mod_info = (None, filename, False)
except AttributeError:
mod_info = (None, loader.load_module(string).__name__, False)
return mod_info
def find_module_pre_py33(string, path=None):
mod_info = None
if path is None:
mod_info = imp.find_module(string)
else:
mod_info = imp.find_module(string, path)
return (mod_info[0], mod_info[1], mod_info[2][2] == imp.PKG_DIRECTORY)
def find_module(string, path=None):
"""Provides information about a module.
This function isolates the differences in importing libraries introduced with
python 3.3 on; it gets a module name and optionally a path. It will return a
tuple containin an open file for the module (if not builtin), the filename
or the name of the module if it is a builtin one and a boolean indicating
if the module is contained in a package."""
if is_py33:
return find_module_py33(string, path)
else:
return find_module_pre_py33(string, path)
# next was defined in python 2.6, in python 3 obj.next won't be possible
# anymore
try:
@@ -81,6 +139,25 @@ else:
eval(compile("""def exec_function(source, global_map):
exec source in global_map """, 'blub', 'exec'))
# re-raise function
if is_py3k:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
eval(compile("""
def reraise(exception, traceback):
raise exception, None, traceback
""", 'blub', 'exec'))
reraise.__doc__ = """
Re-raise `exception` with a `traceback` object.
Usage::
reraise(Exception, sys.exc_info()[2])
"""
# StringIO (Python 2.5 has no io module), so use io only for py3k
try:
from StringIO import StringIO

View File

@@ -7,29 +7,27 @@ catch :exc:`NotFoundError` which is being raised if your completion is not
possible.
"""
from __future__ import with_statement
__all__ = ['Script', 'NotFoundError', 'set_debug_function', '_quick_complete']
import re
import os
import warnings
import parsing
import parsing_representation as pr
from jedi import parsing
from jedi import parsing_representation as pr
from jedi import debug
from jedi import settings
from jedi import helpers
from jedi import common
from jedi import cache
from jedi import modules
from jedi._compatibility import next, unicode
import evaluate
import keywords
import api_classes
import evaluate_representation as er
import dynamic
import imports
import evaluate
import modules
import debug
import settings
import keywords
import helpers
import common
import builtin
import api_classes
import cache
from _compatibility import next, unicode
class NotFoundError(Exception):
@@ -76,6 +74,7 @@ class Script(object):
""" lazy parser."""
return self._module.parser
@api_classes._clear_caches_after_call
def complete(self):
"""
Return :class:`api_classes.Completion` objects. Those objects contain
@@ -209,6 +208,7 @@ class Script(object):
warnings.warn("Use line instead.", DeprecationWarning)
return self.definition()
@api_classes._clear_caches_after_call
def definition(self):
"""
Return the definitions of a the path under the cursor. This is not a
@@ -270,8 +270,9 @@ class Script(object):
d = set([api_classes.Definition(s) for s in scopes
if not isinstance(s, imports.ImportPath._GlobalNamespace)])
return sorted(d, key=lambda x: (x.module_path, x.start_pos))
return self._sorted_defs(d)
@api_classes._clear_caches_after_call
def goto(self):
"""
Return the first definition found by goto. Imports and statements
@@ -282,7 +283,7 @@ class Script(object):
:rtype: list of :class:`api_classes.Definition`
"""
d = [api_classes.Definition(d) for d in set(self._goto()[0])]
return sorted(d, key=lambda x: (x.module_path, x.start_pos))
return self._sorted_defs(d)
def _goto(self, add_import_name=False):
"""
@@ -334,6 +335,7 @@ class Script(object):
definitions = [user_stmt]
return definitions, search_name
@api_classes._clear_caches_after_call
def related_names(self, additional_module_paths=()):
"""
Return :class:`api_classes.RelatedName` objects, which contain all
@@ -367,7 +369,7 @@ class Script(object):
else:
names.append(api_classes.RelatedName(d.names[-1], d))
return sorted(set(names), key=lambda x: (x.module_path, x.start_pos))
return self._sorted_defs(set(names))
def get_in_function_call(self):
"""
@@ -378,6 +380,7 @@ class Script(object):
warnings.warn("Use line instead.", DeprecationWarning)
return self.function_definition()
@api_classes._clear_caches_after_call
def function_definition(self):
"""
Return the function object of the call you're currently in.
@@ -487,8 +490,11 @@ class Script(object):
match = re.match(r'^(.*?)(\.|)(\w?[\w\d]*)$', path, flags=re.S)
return match.groups()
def __del__(self):
api_classes._clear_caches()
@staticmethod
def _sorted_defs(d):
# Note: `or ''` below is required because `module_path` could be
# None and you can't compare None and str in Python 3.
return sorted(d, key=lambda x: (x.module_path or '', x.start_pos))
def defined_names(source, source_path=None, source_encoding='utf-8'):
@@ -507,7 +513,7 @@ def defined_names(source, source_path=None, source_encoding='utf-8'):
modules.source_to_unicode(source, source_encoding),
module_path=source_path,
)
return api_classes.defined_names(parser.scope)
return api_classes._defined_names(parser.scope)
def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,

View File

@@ -3,21 +3,24 @@ The :mod:`api_classes` module contains the return classes of the API. These
classes are the much bigger part of the whole API, because they contain the
interesting information about completion and goto operations.
"""
from __future__ import with_statement
import re
import os
import warnings
import functools
from _compatibility import unicode, next
import cache
import dynamic
from jedi._compatibility import unicode, next
from jedi import settings
from jedi import common
from jedi import parsing_representation as pr
from jedi import cache
import keywords
import recursion
import settings
import dynamic
import evaluate
import imports
import parsing_representation as pr
import evaluate_representation as er
import keywords
def _clear_caches():
@@ -34,6 +37,18 @@ def _clear_caches():
imports.imports_processed = 0
def _clear_caches_after_call(func):
"""
Clear caches just before returning a value.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
result = func(*args, **kwds)
_clear_caches()
return result
return wrapper
class BaseDefinition(object):
_mapping = {'posixpath': 'os.path',
'riscospath': 'os.path',
@@ -69,12 +84,60 @@ class BaseDefinition(object):
@property
def type(self):
"""The type of the definition."""
"""
The type of the definition.
Here is an example of the value of this attribute. Let's consider
the following source. As what is in ``variable`` is unambiguous
to Jedi, :meth:`api.Script.definition` should return a list of
definition for ``sys``, ``f``, ``C`` and ``x``.
>>> from jedi import Script
>>> source = '''
... import sys
...
... class C:
... pass
...
... class D:
... pass
...
... x = D()
...
... def f():
... pass
...
... variable = sys or f or C or x'''
>>> script = Script(source, len(source.splitlines()), 3, 'example.py')
>>> defs = script.definition()
Before showing what is in ``defs``, let's sort it by :attr:`line`
so that it is easy to relate the result to the source code.
>>> defs = sorted(defs, key=lambda d: d.line)
>>> defs # doctest: +NORMALIZE_WHITESPACE
[<Definition module sys>, <Definition class C>,
<Definition class D>, <Definition def f>]
Finally, here is what you can get from :attr:`type`:
>>> defs[0].type
'module'
>>> defs[1].type
'class'
>>> defs[2].type
'instance'
>>> defs[3].type
'function'
"""
# generate the type
stripped = self.definition
if isinstance(self.definition, er.InstanceElement):
stripped = self.definition.var
return type(stripped).__name__
if isinstance(stripped, pr.Name):
stripped = stripped.parent
return type(stripped).__name__.lower()
@property
def path(self):
@@ -83,19 +146,27 @@ class BaseDefinition(object):
if not isinstance(self.definition, keywords.Keyword):
par = self.definition
while par is not None:
try:
with common.ignored(AttributeError):
path.insert(0, par.name)
except AttributeError:
pass
par = par.parent
return path
@property
def module_name(self):
"""The module name."""
"""
The module name.
>>> from jedi import Script
>>> source = 'import datetime'
>>> script = Script(source, 1, len(source), 'example.py')
>>> d = script.definition()[0]
>>> print(d.module_name) # doctest: +ELLIPSIS
datetime
"""
path = self.module_path
sep = os.path.sep
p = re.sub(r'^.*?([\w\d]+)(%s__init__)?.py$' % sep, r'\1', path)
p = re.sub(r'^.*?([\w\d]+)(%s__init__)?.(py|so)$' % sep, r'\1', path)
return p
def in_builtin_module(self):
@@ -125,7 +196,31 @@ class BaseDefinition(object):
@property
def doc(self):
"""Return a document string for this completion object."""
r"""
Return a document string for this completion object.
Example:
>>> from jedi import Script
>>> source = '''\
... def f(a, b=1):
... "Document for function f."
... '''
>>> script = Script(source, 1, len('def f'), 'example.py')
>>> d = script.definition()[0]
>>> print(d.doc)
f(a, b = 1)
<BLANKLINE>
Document for function f.
Notice that useful extra information is added to the actual
docstring. For function, it is call signature. If you need
actual docstring, use :attr:`raw_doc` instead.
>>> print(d.raw_doc)
Document for function f.
"""
try:
return self.definition.doc
except AttributeError:
@@ -133,7 +228,11 @@ class BaseDefinition(object):
@property
def raw_doc(self):
"""The raw docstring ``__doc__`` for any object."""
"""
The raw docstring ``__doc__`` for any object.
See :attr:`doc` for example.
"""
try:
return unicode(self.definition.docstr)
except AttributeError:
@@ -141,21 +240,63 @@ class BaseDefinition(object):
@property
def description(self):
"""A textual description of the object."""
"""
A textual description of the object.
Example:
>>> from jedi import Script
>>> source = '''
... def f():
... pass
...
... class C:
... pass
...
... variable = f or C'''
>>> script = Script(source, len(source.splitlines()), 3, 'example.py')
>>> defs = script.definition() # doctest: +SKIP
>>> defs = sorted(defs, key=lambda d: d.line) # doctest: +SKIP
>>> defs # doctest: +SKIP
[<Definition def f>, <Definition class C>]
>>> defs[0].description # doctest: +SKIP
'def f'
>>> defs[1].description # doctest: +SKIP
'class C'
"""
return unicode(self.definition)
@property
def full_name(self):
"""The path to a certain class/function, see #61."""
"""
Dot-separated path of this object.
It is in the form of ``<module>[.<submodule>[...]][.<object>]``.
It is useful when you want to look up Python manual of the
object at hand.
Example:
>>> from jedi import Script
>>> source = '''
... import os
... os.path.join'''
>>> script = Script(source, 3, len('os.path.join'), 'example.py')
>>> print(script.definition()[0].full_name)
os.path.join
Notice that it correctly returns ``'os.path.join'`` instead of
(for example) ``'posixpath.join'``.
"""
path = [unicode(p) for p in self.path]
# TODO add further checks, the mapping should only occur on stdlib.
if not path:
return None # for keywords the path is empty
try:
with common.ignored(KeyError):
path[0] = self._mapping[path[0]]
except KeyError:
pass
for key, repl in self._tuple_mapping.items():
if tuple(path[:len(key)]) == key:
path = [repl] + path[len(key):]
@@ -250,7 +391,7 @@ class Completion(BaseDefinition):
"""
if self._followed_definitions is None:
if self.definition.isinstance(pr.Statement):
defs = er.follow_statement(self.definition)
defs = evaluate.follow_statement(self.definition)
elif self.definition.isinstance(pr.Import):
defs = imports.strip_imports([self.definition])
else:
@@ -365,10 +506,10 @@ class Definition(BaseDefinition):
d = d.var
if isinstance(d, pr.Name):
d = d.parent
return defined_names(d)
return _defined_names(d)
def defined_names(scope):
def _defined_names(scope):
"""
List sub-definitions (e.g., methods in class).

View File

@@ -22,7 +22,7 @@ possible to access functions like ``list`` and ``int`` directly, the same way
"""
from __future__ import with_statement
from _compatibility import exec_function, is_py3k
from jedi._compatibility import exec_function, is_py3k
import re
import sys
@@ -32,11 +32,11 @@ if is_py3k:
import types
import inspect
import common
import debug
import parsing
from jedi import common
from jedi import debug
from jedi import parsing
from jedi import modules
import evaluate
import modules
class BuiltinModule(modules.CachedModule):

View File

@@ -21,11 +21,17 @@ from __future__ import with_statement
import time
import os
import sys
import hashlib
try:
import cPickle as pickle
except:
import pickle
import shutil
from _compatibility import json
import settings
import debug
from jedi._compatibility import json
from jedi import settings
from jedi import common
from jedi import debug
# memoize caches will be deleted after every action
memoize_caches = []
@@ -143,12 +149,10 @@ def cache_function_definition(stmt):
def cache_star_import(func):
def wrapper(scope, *args, **kwargs):
try:
with common.ignored(KeyError):
mods = star_import_cache[scope]
if mods[0] + settings.star_import_cache_validity > time.time():
return mods[1]
except KeyError:
pass
# cache is too old and therefore invalid or not available
invalidate_star_import_cache(scope)
mods = func(scope, *args, **kwargs)
@@ -160,15 +164,13 @@ def cache_star_import(func):
def invalidate_star_import_cache(module, only_main=False):
""" Important if some new modules are being reparsed """
try:
with common.ignored(KeyError):
t, mods = star_import_cache[module]
del star_import_cache[module]
for m in mods:
invalidate_star_import_cache(m, only_main=True)
except KeyError:
pass
if not only_main:
# We need a list here because otherwise the list is being changed
@@ -216,13 +218,36 @@ def save_module(path, name, parser, pickling=True):
class _ModulePickling(object):
version = 2
"""
Version number (integer) for file system cache.
Increment this number when there are any incompatible changes in
parser representation classes. For example, the following changes
are regarded as incompatible.
- Class name is changed.
- Class is moved to another module.
- Defined slot of the class is changed.
"""
def __init__(self):
self.__index = None
self.py_version = '%s.%s' % sys.version_info[:2]
self.py_tag = 'cpython-%s%s' % sys.version_info[:2]
"""
Short name for distinguish Python implementations and versions.
It's like `sys.implementation.cache_tag` but for Python < 3.3
we generate something similar. See:
http://docs.python.org/3/library/sys.html#sys.implementation
.. todo:: Detect interpreter (e.g., PyPy).
"""
def load_module(self, path, original_changed_time):
try:
pickle_changed_time = self._index[self.py_version][path]
pickle_changed_time = self._index[path]
except KeyError:
return None
if original_changed_time is not None \
@@ -238,11 +263,12 @@ class _ModulePickling(object):
return parser_cache_item.parser
def save_module(self, path, parser_cache_item):
self.__index = None
try:
files = self._index[self.py_version]
files = self._index
except KeyError:
files = {}
self._index[self.py_version] = files
self._index = files
with open(self._get_hashed_path(path), 'wb') as f:
pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL)
@@ -255,9 +281,16 @@ class _ModulePickling(object):
if self.__index is None:
try:
with open(self._get_path('index.json')) as f:
self.__index = json.load(f)
data = json.load(f)
except IOError:
self.__index = {}
else:
# 0 means version is not defined (= always delete cache):
if data.get('version', 0) != self.version:
self.delete_cache()
self.__index = {}
else:
self.__index = data['index']
return self.__index
def _remove_old_modules(self):
@@ -268,18 +301,25 @@ class _ModulePickling(object):
self._index # reload index
def _flush_index(self):
data = {'version': self.version, 'index': self._index}
with open(self._get_path('index.json'), 'w') as f:
json.dump(self._index, f)
json.dump(data, f)
self.__index = None
def delete_cache(self):
shutil.rmtree(self._cache_directory())
def _get_hashed_path(self, path):
return self._get_path('%s_%s.pkl' % (self.py_version, hash(path)))
return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest())
def _get_path(self, file):
dir = settings.cache_directory
dir = self._cache_directory()
if not os.path.exists(dir):
os.makedirs(dir)
return dir + os.path.sep + file
return os.path.join(dir, file)
def _cache_directory(self):
return os.path.join(settings.cache_directory, self.py_tag)
# is a singleton

View File

@@ -1,10 +1,11 @@
""" A universal module with functions / classes without dependencies. """
import sys
import contextlib
import tokenize
import functools
import tokenizer as tokenize
from _compatibility import next
import debug
import settings
from jedi._compatibility import next, reraise
from jedi import settings
FLOWS = ['if', 'else', 'elif', 'while', 'with', 'try', 'except', 'finally']
@@ -16,23 +17,47 @@ class MultiLevelStopIteration(Exception):
pass
class MultiLevelAttributeError(Exception):
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
`hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
Therefore this class has to be a `BaseException` and not an `Exception`.
But because I rewrote hasattr, we can now switch back to `Exception`.
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def __init__(self, base=None):
self.base = base
def __str__(self):
import traceback
tb = traceback.format_exception(*self.base)
return 'Original:\n\n' + ''.join(tb)
def rethrow_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
.. note:: Treating StopIteration here is easy.
Add that feature when needed.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError:
exc_info = sys.exc_info()
reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
return wrapper
class PushBackIterator(object):
@@ -84,29 +109,10 @@ class NoErrorTokenizer(object):
def __next__(self):
if self.closed:
raise MultiLevelStopIteration()
try:
self.last_previous = self.previous
self.previous = self.current
self.current = next(self.gen)
except tokenize.TokenError:
# We just ignore this error, I try to handle it earlier - as
# good as possible
debug.warning('parentheses not closed error')
return self.__next__()
except IndentationError:
# This is an error, that tokenize may produce, because the code
# is not indented as it should. Here it just ignores this line
# and restarts the parser.
# (This is a rather unlikely error message, for normal code,
# tokenize seems to be pretty tolerant)
debug.warning('indentation error on line %s, ignoring it' %
self.current[2][0])
# add the starting line of the last position
self.offset = self.current[2]
self.gen = PushBackIterator(tokenize.generate_tokens(
self.readline))
return self.__next__()
c = list(self.current)
if c[0] == tokenize.ENDMARKER:
@@ -187,3 +193,13 @@ def indent_block(text, indention=' '):
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp
@contextlib.contextmanager
def ignored(*exceptions):
"""Context manager that ignores all of the specified exceptions. This will
be in the standard library starting with Python 3.4."""
try:
yield
except exceptions:
pass

View File

@@ -16,10 +16,10 @@ annotations.
import re
import cache
from jedi import cache
from jedi import parsing
import evaluate
import evaluate_representation as er
import parsing
DOCSTRING_PARAM_PATTERNS = [
r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx

View File

@@ -55,16 +55,17 @@ from __future__ import with_statement
import os
import cache
import parsing_representation as pr
import evaluate_representation as er
import modules
import evaluate
import settings
import debug
import imports
from jedi import cache
from jedi import parsing_representation as pr
from jedi import modules
from jedi import settings
from jedi import common
from jedi import debug
from jedi import fast_parser
import api_classes
import fast_parser
import evaluate
import imports
import evaluate_representation as er
# This is something like the sys.path, but only for searching params. It means
# that this is the order in which Jedi searches params.
@@ -487,10 +488,8 @@ def related_name_add_import_modules(definitions, search_name):
for d in definitions:
if isinstance(d.parent, pr.Import):
s = imports.ImportPath(d.parent, direct_resolve=True)
try:
with common.ignored(IndexError):
new.add(s.follow(is_goto=True)[0])
except IndexError:
pass
return set(definitions) | new

View File

@@ -68,21 +68,22 @@ backtracking algorithm.
.. todo:: nonlocal statement, needed or can be ignored? (py3k)
"""
from _compatibility import next, hasattr, is_py3k, unicode, utf8
from __future__ import with_statement
import sys
import itertools
import common
import cache
import parsing_representation as pr
from jedi._compatibility import next, hasattr, is_py3k, unicode, reraise
from jedi import common
from jedi import cache
from jedi import parsing_representation as pr
from jedi import debug
import evaluate_representation as er
import debug
import recursion
import docstrings
import builtin
import imports
import recursion
import dynamic
import docstrings
def get_defined_names_for_position(scope, position=None, start_scope=None):
@@ -179,7 +180,7 @@ def get_names_of_scope(scope, position=None, star_search=True,
yield scope, get_defined_names_for_position(scope,
position, in_func_scope)
except StopIteration:
raise common.MultiLevelStopIteration('StopIteration raised')
reraise(common.MultiLevelStopIteration, sys.exc_info()[2])
if scope.isinstance(pr.ForFlow) and scope.is_list_comp:
# is a list comprehension
yield scope, scope.get_set_vars(is_internal_call=True)
@@ -433,11 +434,9 @@ def find_name(scope, name_str, position=None, search_global=False,
if isinstance(scope, (er.Instance, er.Class)) \
and hasattr(r, 'get_descriptor_return'):
# handle descriptors
try:
with common.ignored(KeyError):
res_new += r.get_descriptor_return(scope)
continue
except KeyError:
pass
res_new.append(r)
return res_new
@@ -466,19 +465,15 @@ def check_getattr(inst, name_str):
# str is important to lose the NamePart!
module = builtin.Builtin.scope
name = pr.Call(module, str(name_str), pr.Call.STRING, (0, 0), inst)
try:
with common.ignored(KeyError):
result = inst.execute_subscope_by_name('__getattr__', [name])
except KeyError:
pass
if not result:
# this is a little bit special. `__getattribute__` is executed
# before anything else. But: I know no use case, where this
# could be practical and the jedi would return wrong types. If
# you ever have something, let me know!
try:
with common.ignored(KeyError):
result = inst.execute_subscope_by_name('__getattribute__', [name])
except KeyError:
pass
return result
@@ -540,10 +535,8 @@ def assign_tuples(tup, results, seek_name):
debug.warning("invalid tuple lookup %s of result %s in %s"
% (tup, results, seek_name))
else:
try:
with common.ignored(IndexError):
types += func(index)
except IndexError:
pass
return types
result = []
@@ -585,12 +578,7 @@ def follow_statement(stmt, seek_name=None):
commands = stmt.get_commands()
debug.dbg('calls: %s' % commands)
try:
result = follow_call_list(commands)
except AttributeError:
# This is so evil! But necessary to propagate errors. The attribute
# errors here must not be catched, because they shouldn't exist.
raise common.MultiLevelAttributeError(sys.exc_info())
# Assignment checking is only important if the statement defines multiple
# variables.
@@ -602,6 +590,7 @@ def follow_statement(stmt, seek_name=None):
return set(result)
@common.rethrow_uncaught
def follow_call_list(call_list, follow_array=False):
"""
`call_list` can be either `pr.Array` or `list of list`.
@@ -656,11 +645,9 @@ def follow_call_list(call_list, follow_array=False):
call = next(calls_iterator)
except StopIteration:
break
try:
with common.ignored(AttributeError):
if str(call.name) == 'else':
break
except AttributeError:
pass
continue
result += follow_call(call)
elif call == '*':

View File

@@ -9,22 +9,23 @@ instantiated. This class represents these cases.
So, why is there also a ``Class`` class here? Well, there are decorators and
they change classes in Python 3.
"""
import sys
from __future__ import with_statement
import copy
import itertools
from _compatibility import property, use_metaclass, next, hasattr
import parsing_representation as pr
import imports
from jedi._compatibility import property, use_metaclass, next, hasattr
from jedi import parsing_representation as pr
from jedi import cache
from jedi import helpers
from jedi import debug
from jedi import common
import recursion
import docstrings
import cache
import imports
import evaluate
import builtin
import dynamic
import helpers
import recursion
import debug
import evaluate
import common
class DecoratorNotFound(LookupError):
@@ -62,10 +63,8 @@ class Instance(use_metaclass(cache.CachedMetaClass, Executable)):
else:
# need to execute the __init__ function, because the dynamic param
# searching needs it.
try:
with common.ignored(KeyError):
self.execute_subscope_by_name('__init__', self.var_args)
except KeyError:
pass
# Generated instances are classes that are just generated by self
# (No var_args) used.
self.is_generated = False
@@ -668,6 +667,7 @@ class Execution(Executable):
"""
return self.get_params() + pr.Scope.get_set_vars(self)
@common.rethrow_uncaught
def copy_properties(self, prop):
"""
Literally copies a property of a Function. Copying is very expensive,
@@ -675,7 +675,6 @@ class Execution(Executable):
objects can be used for the executions, as if they were in the
execution.
"""
try:
# Copy all these lists into this local function.
attr = getattr(self.base, prop)
objects = []
@@ -689,8 +688,6 @@ class Execution(Executable):
copied = Function(copied)
objects.append(copied)
return objects
except AttributeError:
raise common.MultiLevelAttributeError(sys.exc_info())
def __getattr__(self, name):
if name not in ['start_pos', 'end_pos', 'imports', '_sub_module']:
@@ -698,8 +695,8 @@ class Execution(Executable):
return getattr(self.base, name)
@cache.memoize_default()
@common.rethrow_uncaught
def _scope_copy(self, scope):
try:
""" Copies a scope (e.g. if) in an execution """
# TODO method uses different scopes than the subscopes property.
@@ -711,8 +708,6 @@ class Execution(Executable):
copied = helpers.fast_parent_copy(scope)
copied.parent = self._scope_copy(copied.parent)
return copied
except AttributeError:
raise common.MultiLevelAttributeError(sys.exc_info())
@property
@cache.memoize_default()
@@ -774,9 +769,13 @@ class Generator(use_metaclass(cache.CachedMetaClass, pr.Base)):
debug.warning('Tried to get array access on a generator', self)
return []
@property
def parent(self):
return self.func.parent
def __getattr__(self, name):
if name not in ['start_pos', 'end_pos', 'parent', 'get_imports',
'asserts', 'doc', 'docstr', 'get_parent_until', 'get_code',
'subscopes']:
raise AttributeError("Accessing %s of %s is not allowed."
% (self, name))
return getattr(self.func, name)
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self.func)
@@ -805,10 +804,8 @@ class Array(use_metaclass(cache.CachedMetaClass, pr.Base)):
if isinstance(index, Instance) \
and str(index.name) in ['int', 'str'] \
and len(index.var_args) == 1:
try:
with common.ignored(KeyError, IndexError):
return self.get_exact_index_types(index.var_args[0])
except (KeyError, IndexError):
pass
result = list(self._follow_values(self._array.values))
result += dynamic.check_array_additions(self)

View File

@@ -6,11 +6,11 @@ finished (and still not working as I want), I won't document it any further.
import re
import operator
from _compatibility import use_metaclass, reduce, property
import settings
import parsing
import parsing_representation as pr
import cache
from jedi._compatibility import use_metaclass, reduce, property
from jedi import settings
from jedi import parsing
from jedi import parsing_representation as pr
from jedi import cache
import common

View File

@@ -1,6 +1,9 @@
from __future__ import with_statement
import copy
import parsing_representation as pr
from jedi import common
from jedi import parsing_representation as pr
def fast_parent_copy(obj):
@@ -21,13 +24,11 @@ def fast_parent_copy(obj):
before = ()
for cls in new_obj.__class__.__mro__:
try:
with common.ignored(AttributeError):
if before == cls.__slots__:
continue
before = cls.__slots__
items += [(n, getattr(new_obj, n)) for n in before]
except AttributeError:
pass
for key, value in items:
# replace parent (first try _parent and then parent)
@@ -35,10 +36,8 @@ def fast_parent_copy(obj):
if key == 'parent' and '_parent' in items:
# parent can be a property
continue
try:
with common.ignored(KeyError):
setattr(new_obj, key, new_elements[value])
except KeyError:
pass
elif key in ['parent_function', 'use_as_parent', '_sub_module']:
continue
elif isinstance(value, list):

View File

@@ -5,28 +5,27 @@ any actual importing done. This module is about finding modules in the
filesystem. This can be quite tricky sometimes, because Python imports are not
always that simple.
Currently the import process uses ``imp`` to find modules. In the future, it's
a goal to use ``importlib`` for this purpose. There's a `pull request
<https://github.com/davidhalter/jedi/pull/109>`_ for that.
This module uses imp for python up to 3.2 and importlib for python 3.3 on; the
correct implementation is delegated to _compatibility.
This module also supports import autocompletion, which means to complete
statements like ``from datetim`` (curser at the end would return ``datetime``).
"""
from __future__ import with_statement
import os
import pkgutil
import imp
import sys
import builtin
import modules
import debug
import parsing_representation as pr
import evaluate
import itertools
import cache
from jedi._compatibility import find_module
from jedi import modules
from jedi import common
from jedi import debug
from jedi import parsing_representation as pr
from jedi import cache
import builtin
import evaluate
# for debugging purposes only
imports_processed = 0
@@ -123,11 +122,9 @@ class ImportPath(pr.Base):
if self.import_stmt.relative_count:
rel_path = self.get_relative_path() + '/__init__.py'
try:
with common.ignored(IOError):
m = modules.Module(rel_path)
names += m.parser.module.get_defined_names()
except IOError:
pass
else:
if on_import_stmt and isinstance(scope, pr.Module) \
and scope.path.endswith('__init__.py'):
@@ -238,20 +235,22 @@ class ImportPath(pr.Base):
global imports_processed
imports_processed += 1
importing = None
if path is not None:
return imp.find_module(string, [path])
importing = find_module(string, [path])
else:
debug.dbg('search_module', string, self.file_path)
# Override the sys.path. It works only good that way.
# Injecting the path directly into `find_module` did not work.
sys.path, temp = sys_path_mod, sys.path
try:
i = imp.find_module(string)
importing = find_module(string)
except ImportError:
sys.path = temp
raise
sys.path = temp
return i
return importing
if self.file_path:
sys_path_mod = list(self.sys_path_with_modifications())
@@ -259,6 +258,9 @@ class ImportPath(pr.Base):
else:
sys_path_mod = list(modules.get_sys_path())
def module_not_found():
raise ModuleNotFound('The module you searched has not been found')
current_namespace = (None, None, None)
# now execute those paths
rest = []
@@ -270,19 +272,19 @@ class ImportPath(pr.Base):
and len(self.import_path) == 1:
# follow `from . import some_variable`
rel_path = self.get_relative_path()
try:
with common.ignored(ImportError):
current_namespace = follow_str(rel_path, '__init__')
except ImportError:
pass
if current_namespace[1]:
rest = self.import_path[i:]
else:
raise ModuleNotFound(
'The module you searched has not been found')
module_not_found()
if current_namespace == (None, None, False):
module_not_found()
sys_path_mod.pop(0) # TODO why is this here?
path = current_namespace[1]
is_package_directory = current_namespace[2][2] == imp.PKG_DIRECTORY
is_package_directory = current_namespace[2]
f = None
if is_package_directory or current_namespace[0]:

View File

@@ -1,9 +1,12 @@
import keyword
from _compatibility import is_py3k
import builtin
from __future__ import with_statement
import pydoc
import keyword
from jedi._compatibility import is_py3k
from jedi import common
import builtin
try:
from pydoc_data import topics as pydoc_topics
except ImportError:
@@ -63,12 +66,10 @@ def imitate_pydoc(string):
# with unicode strings)
string = str(string)
h = pydoc.help
try:
with common.ignored(KeyError):
# try to access symbols
string = h.symbols[string]
string, _, related = string.partition(' ')
except KeyError:
pass
get_target = lambda s: h.topics.get(s, h.keywords.get(s))
while isinstance(string, str):

View File

@@ -15,19 +15,19 @@ Apart from those classes there's a ``sys.path`` fetching function, as well as
"""
from __future__ import with_statement
from _compatibility import exec_function, unicode, is_py25, literal_eval
import re
import tokenize
import tokenizer as tokenize
import sys
import os
import cache
import parsing
import parsing_representation as pr
import fast_parser
import debug
import settings
from jedi._compatibility import exec_function, unicode, is_py25, literal_eval
from jedi import cache
from jedi import parsing
from jedi import parsing_representation as pr
from jedi import fast_parser
from jedi import debug
from jedi import settings
from jedi import common
class CachedModule(object):
@@ -108,11 +108,9 @@ class ModuleWithCursor(Module):
def parser(self):
""" get the parser lazy """
if not self._parser:
try:
with common.ignored(KeyError):
parser = cache.parser_cache[self.path].parser
cache.invalidate_star_import_cache(parser.module)
except KeyError:
pass
# Call the parser already here, because it will be used anyways.
# Also, the position is here important (which will not be used by
# default), therefore fill the cache here.
@@ -279,9 +277,8 @@ def get_sys_path():
venv, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages')
sys_path.insert(0, p)
p = sys.path[1:]
check_virtual_env(p)
return p
check_virtual_env(sys.path)
return [p for p in sys.path if p != ""]
@cache.memoize_default([])
@@ -350,10 +347,8 @@ def sys_path_with_modifications(module):
return [] # support for modules without a path is intentionally bad.
curdir = os.path.abspath(os.curdir)
try:
with common.ignored(OSError):
os.chdir(os.path.dirname(module.path))
except OSError:
pass
result = check_module(module)
result += detect_django_path(module.path)
@@ -374,12 +369,10 @@ def detect_django_path(module_path):
else:
module_path = new
try:
with common.ignored(IOError):
with open(module_path + os.path.sep + 'manage.py'):
debug.dbg('Found django path: %s' % module_path)
result.append(module_path)
except IOError:
pass
return result

View File

@@ -15,14 +15,15 @@ within the statement. This lowers memory usage and cpu time and reduces the
complexity of the ``Parser`` (there's another parser sitting inside
``Statement``, which produces ``Array`` and ``Call``).
"""
from _compatibility import next, StringIO
from __future__ import with_statement
import tokenize
import tokenizer as tokenize
import keyword
import debug
import common
import parsing_representation as pr
from jedi._compatibility import next, StringIO
from jedi import debug
from jedi import common
from jedi import parsing_representation as pr
class ParserError(Exception):
@@ -395,10 +396,11 @@ class Parser(object):
stmt = stmt_class(self.module, set_vars, used_vars, tok_list,
first_pos, self.end_pos)
stmt.parent = self.top_module
self._check_user_stmt(stmt)
# Attribute docstring (PEP 257) support
try:
with common.ignored(IndexError, AttributeError):
# If string literal is being parsed
first_tok = stmt.token_list[0]
if (not stmt.set_vars and
@@ -407,8 +409,6 @@ class Parser(object):
first_tok[0] == tokenize.STRING):
# ... then set it as a docstring
self.scope.statements[-1].add_docstr(first_tok[1])
except (IndexError, AttributeError):
pass
if tok in always_break + not_first_break:
self._gen.push_last_back()

View File

@@ -33,15 +33,16 @@ statements in this scope. Check this out:
See also :attr:`Scope.subscopes` and :attr:`Scope.statements`.
"""
from __future__ import with_statement
import os
import re
import tokenize
import tokenizer as tokenize
from _compatibility import next, literal_eval, cleandoc, Python3Method, \
from jedi._compatibility import next, literal_eval, cleandoc, Python3Method, \
encoding, property, unicode, is_py3k
import common
import debug
from jedi import common
from jedi import debug
class Base(object):
@@ -564,8 +565,10 @@ class Flow(Scope):
@parent.setter
def parent(self, value):
self._parent = value
if self.next:
try:
self.next.parent = value
except AttributeError:
return
def get_code(self, first_indent=False, indention=' '):
stmts = []
@@ -1282,11 +1285,8 @@ class Array(Call):
inner = []
for i, stmt in enumerate(self.values):
s = ''
try:
with common.ignored(IndexError):
key = self.keys[i]
except IndexError:
pass
else:
s += key.get_code(new_line=False) + ': '
s += stmt.get_code(new_line=False)
inner.append(s)

View File

@@ -7,11 +7,11 @@ Next to :mod:`cache` this module also makes |jedi| not thread-safe. Why?
``ExecutionRecursionDecorator`` uses class variables to count the function
calls.
"""
import parsing_representation as pr
from jedi import parsing_representation as pr
from jedi import debug
from jedi import settings
import evaluate_representation as er
import debug
import builtin
import settings
class RecursionDecorator(object):

View File

@@ -12,13 +12,14 @@ following functions (sometimes bug-prone):
- extract variable
- inline variable
"""
from __future__ import with_statement
import modules
import difflib
import helpers
import parsing_representation as pr
from jedi import common
from jedi import modules
from jedi import helpers
from jedi import parsing_representation as pr
class Refactoring(object):
@@ -167,7 +168,7 @@ def inline(script):
dct = {}
definitions = script.goto()
try:
with common.ignored(AssertionError):
assert len(definitions) == 1
stmt = definitions[0].definition
related_names = script.related_names()
@@ -201,7 +202,4 @@ def inline(script):
else:
new_lines.pop(index)
except AssertionError:
pass
return Refactoring(dct)

270
jedi/tokenizer.py Normal file
View File

@@ -0,0 +1,270 @@
"""
This tokenizer has been copied from the ``tokenize.py`` standard library
tokenizer. The reason was simple: The standanrd library tokenizer fails
if the indentation is not right. The fast parser of jedi however requires
"wrong" indentation.
Basically this is a stripped down version of the standard library module, so
you can read the documentation there.
"""
import string
import re
from token import *
from codecs import lookup, BOM_UTF8
import collections
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
namechars = string.ascii_letters + '_'
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
Octnumber = r'0[oO][0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
def _compile(expr):
return re.compile(expr, re.UNICODE)
tokenprog, pseudoprog, single3prog, double3prog = map(
_compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": _compile(Single), '"': _compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None, 'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ):
single_quoted[t] = t
del _compile
tabsize = 8
class TokenError(Exception): pass
def generate_tokens(readline):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while True: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = b''
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
# multiline string has not been finished
break
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield TokenInfo(NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
# basically a statement has not been finished here.
break
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield TokenInfo(NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos + 1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')

10
pytest.ini Normal file
View File

@@ -0,0 +1,10 @@
[pytest]
addopts = --doctest-modules
# Ignore broken files in blackbox test directories
norecursedirs = .* docs completion refactor
# Activate `clean_jedi_cache` fixture for all tests. This should be
# fine as long as we are using `clean_jedi_cache` as a session scoped
# fixture.
usefixtures = clean_jedi_cache

View File

@@ -31,7 +31,13 @@ setup(name='jedi',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Editors :: Integrated Development Environments (IDE)',
'Topic :: Utilities',

View File

@@ -1,4 +1,3 @@
import time
import sys
if sys.hexversion < 0x02070000:
import unittest2 as unittest
@@ -8,48 +7,17 @@ import os
from os.path import abspath, dirname
import functools
test_dir = dirname(abspath(__file__))
root_dir = dirname(test_dir)
sys.path.insert(0, root_dir)
import pytest
import jedi
from jedi import debug
test_sum = 0
t_start = time.time()
# Sorry I didn't use argparse here. It's because argparse is not in the
# stdlib in 2.5.
args = sys.argv[1:]
print_debug = False
try:
i = args.index('--debug')
args = args[:i] + args[i + 1:]
except ValueError:
pass
else:
print_debug = True
jedi.set_debug_function(debug.print_to_stdout)
sys.argv = sys.argv[:1] + args
summary = []
tests_fail = 0
from jedi._compatibility import is_py25
def get_test_list():
# get test list, that should be executed
test_files = {}
last = None
for arg in sys.argv[1:]:
if arg.isdigit():
if last is None:
continue
test_files[last].append(int(arg))
else:
test_files[arg] = []
last = arg
return test_files
test_dir = dirname(abspath(__file__))
root_dir = dirname(test_dir)
sample_int = 1 # This is used in completion/imports.py
class TestBase(unittest.TestCase):
@@ -76,13 +44,6 @@ class TestBase(unittest.TestCase):
return script.function_definition()
def print_summary():
print('\nSummary: (%s fails of %s tests) in %.3fs' % \
(tests_fail, test_sum, time.time() - t_start))
for s in summary:
print(s)
def cwd_at(path):
"""
Decorator to run function at `path`.
@@ -102,3 +63,32 @@ def cwd_at(path):
os.chdir(oldcwd)
return wrapper
return decorator
_py25_fails = 0
py25_allowed_fails = 9
def skip_py25_fails(func):
"""
Skip first `py25_allowed_fails` failures in Python 2.5.
.. todo:: Remove this decorator by implementing "skip tag" for
integration tests.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
global _py25_fails
try:
func(*args, **kwds)
except AssertionError:
_py25_fails += 1
if _py25_fails > py25_allowed_fails:
raise
else:
pytest.skip("%d-th failure (there can be %d failures)" %
(_py25_fails, py25_allowed_fails))
return wrapper
if not is_py25:
skip_py25_fails = lambda f: f

View File

@@ -154,9 +154,9 @@ mod1.a
from .. import base
#? int()
base.tests_fail
base.sample_int
from ..base import tests_fail as f
from ..base import sample_int as f
#? int()
f

View File

@@ -89,12 +89,3 @@ def huhu(db):
"""
#? sqlite3.Connection()
db
# -----------------
# various regression tests
# -----------------
#62
import threading
#? ['_Verbose', '_VERBOSE']
threading._Verbose

View File

@@ -1,13 +1,23 @@
from os.path import join, dirname, abspath
default_base_dir = join(dirname(abspath(__file__)), 'completion')
import os
import shutil
import tempfile
import run
import pytest
from . import base
from . import run
from . import refactor
def pytest_addoption(parser):
parser.addoption(
"--base-dir", default=default_base_dir,
"--integration-case-dir",
default=os.path.join(base.test_dir, 'completion'),
help="Directory in which integration test case files locate.")
parser.addoption(
"--refactor-case-dir",
default=os.path.join(base.test_dir, 'refactor'),
help="Directory in which refactoring test case files locate.")
parser.addoption(
"--test-files", "-T", default=[], action='append',
help=(
@@ -15,7 +25,7 @@ def pytest_addoption(parser):
"For example: -T generators.py:10,13,19. "
"Note that you can use -m to specify the test case by id."))
parser.addoption(
"--thirdparty",
"--thirdparty", action='store_true',
help="Include integration tests that requires third party modules.")
@@ -38,11 +48,52 @@ def pytest_generate_tests(metafunc):
"""
:type metafunc: _pytest.python.Metafunc
"""
if 'case' in metafunc.fixturenames:
base_dir = metafunc.config.option.base_dir
test_files = dict(map(parse_test_files_option,
metafunc.config.option.test_files))
if 'case' in metafunc.fixturenames:
base_dir = metafunc.config.option.integration_case_dir
thirdparty = metafunc.config.option.thirdparty
cases = list(run.collect_dir_tests(base_dir, test_files))
if thirdparty:
cases.extend(run.collect_dir_tests(
os.path.join(base_dir, 'thirdparty'), test_files, True))
metafunc.parametrize('case', cases)
if 'refactor_case' in metafunc.fixturenames:
base_dir = metafunc.config.option.refactor_case_dir
metafunc.parametrize(
'case',
run.collect_dir_tests(base_dir, test_files, thirdparty))
'refactor_case',
refactor.collect_dir_tests(base_dir, test_files))
@pytest.fixture()
def isolated_jedi_cache(monkeypatch, tmpdir):
"""
Set `jedi.settings.cache_directory` to a temporary directory during test.
Same as `clean_jedi_cache`, but create the temporary directory for
each test case (scope='function').
"""
settings = base.jedi.settings
monkeypatch.setattr(settings, 'cache_directory', str(tmpdir))
@pytest.fixture(scope='session')
def clean_jedi_cache(request):
"""
Set `jedi.settings.cache_directory` to a temporary directory during test.
Note that you can't use built-in `tmpdir` and `monkeypatch`
fixture here because their scope is 'function', which is not used
in 'session' scope fixture.
This fixture is activated in ../pytest.ini.
"""
settings = base.jedi.settings
old = settings.cache_directory
tmp = tempfile.mkdtemp(prefix='jedi-test-')
settings.cache_directory = tmp
@request.addfinalizer
def restore():
settings.cache_directory = old
shutil.rmtree(tmp)

View File

@@ -4,13 +4,8 @@ Refactoring tests work a little bit similar to Black Box tests. But the idea is
here to compare two versions of code.
"""
from __future__ import with_statement
import sys
import os
import traceback
import re
import itertools
import base
from jedi._compatibility import reduce
import jedi
@@ -64,7 +59,7 @@ class RefactoringCase(object):
self.name, self.line_nr - 1)
def collect_file_tests(source, f_name, lines_to_execute):
def collect_file_tests(source, path, lines_to_execute):
r = r'^# --- ?([^\n]*)\n((?:(?!\n# \+\+\+).)*)' \
r'\n# \+\+\+((?:(?!\n# ---).)*)'
for match in re.finditer(r, source, re.DOTALL | re.MULTILINE):
@@ -86,7 +81,6 @@ def collect_file_tests(source, f_name, lines_to_execute):
if lines_to_execute and line_nr - 1 not in lines_to_execute:
continue
path = os.path.join(os.path.abspath(refactoring_test_dir), f_name)
yield RefactoringCase(name, source, line_nr, index, path,
new_name, start_line_test, second)
@@ -96,65 +90,8 @@ def collect_dir_tests(base_dir, test_files):
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
if f_name.endswith(".py") and (not test_files or files_to_execute):
path = os.path.join(refactoring_test_dir, f_name)
path = os.path.join(base_dir, f_name)
with open(path) as f:
source = f.read()
for case in collect_file_tests(source, f_name, lines_to_execute):
for case in collect_file_tests(source, path, lines_to_execute):
yield case
def run_test(cases):
"""
This is the completion test for some cases. The tests are not unit test
like, they are rather integration tests.
It uses comments to specify a test in the next line. The comment also says,
which results are expected. The comment always begins with `#?`. The last
row symbolizes the cursor.
For example::
#? ['ab']
ab = 3; a
#? int()
ab = 3; ab
"""
fails = 0
tests = 0
for case in cases:
try:
if not case.check():
print(case)
print(' ' + repr(str(case.result)))
print(' ' + repr(case.desired))
fails += 1
except Exception:
print(traceback.format_exc())
print(case)
fails += 1
tests += 1
return tests, fails
def test_dir(refactoring_test_dir):
for (path, cases) in itertools.groupby(
collect_dir_tests(refactoring_test_dir, test_files),
lambda case: case.path):
num_tests, fails = run_test(cases)
base.test_sum += num_tests
f_name = os.path.basename(path)
s = 'run %s tests with %s fails (%s)' % (num_tests, fails, f_name)
base.tests_fail += fails
print(s)
base.summary.append(s)
if __name__ == '__main__':
refactoring_test_dir = os.path.join(base.test_dir, 'refactor')
test_files = base.get_test_list()
test_dir(refactoring_test_dir)
base.print_summary()
sys.exit(1 if base.tests_fail else 0)

View File

@@ -17,62 +17,40 @@ There are different kind of tests:
How to run tests?
+++++++++++++++++
Basically ``run.py`` searches the ``completion`` directory for files with lines
starting with the symbol above. There is also support for third party
libraries. In a normal test run (``./run.py``) they are not being executed, you
have to provide a ``--thirdparty`` option.
Jedi uses pytest_ to run unit and integration tests. To run tests,
simply run ``py.test``. You can also use tox_ to run tests for
multiple Python versions.
Now it's much more important, that you know how test only one file (``./run.py
classes``, where ``classes`` is the name of the file to test) or even one test
(``./run.py classes 90``, which would just execute the test on line 90).
.. _pytest: http://pytest.org
.. _tox: http://testrun.org/tox
If you want to debug a test, just use the --debug option.
Integration test cases are located in ``test/completion`` directory
and each test cases are indicated by the comment ``#?`` (complete /
definitions), ``#!`` (assignments) and ``#<`` (usages). There is also
support for third party libraries. In a normal test run they are not
being executed, you have to provide a ``--thirdparty`` option.
In addition to standard `-k` and `-m` options in py.test, you can use
`-T` (`--test-files`) option to specify integration test cases to run.
It takes the format of ``FILE_NAME[:LINE[,LINE[,...]]]`` where
``FILE_NAME`` is a file in ``test/completion`` and ``LINE`` is a line
number of the test comment. Here is some recipes:
Run tests only in ``basic.py`` and ``imports.py``::
py.test test/test_integration.py -T basic.py -T imports.py
Run test at line 4, 6, and 8 in ``basic.py``::
py.test test/test_integration.py -T basic.py:4,6,8
See ``py.test --help`` for more information.
If you want to debug a test, just use the --pdb option.
Auto-Completion
+++++++++++++++
.. autofunction:: run_completion_test
Definition
++++++++++
.. autofunction:: run_definition_test
Goto
++++
.. autofunction:: run_goto_test
Related Names
+++++++++++++
.. autofunction:: run_related_name_test
"""
import os
import sys
import re
import traceback
import itertools
import base
from jedi._compatibility import unicode, StringIO, reduce, literal_eval, is_py25
import jedi
from jedi import debug
sys.path.pop(0) # pop again, because it might affect the completion
TEST_COMPLETIONS = 0
TEST_DEFINITIONS = 1
TEST_ASSIGNMENTS = 2
TEST_USAGES = 3
def run_completion_test(case):
"""
Uses comments to specify a test in the next line. The comment says, which
results are expected. The comment always begins with `#?`. The last row
symbolizes the cursor.
@@ -85,74 +63,18 @@ def run_completion_test(case):
Because it follows ``a.rea`` and a is an ``int``, which has a ``real``
property.
Returns 1 for fail and 0 for success.
"""
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
completions = script.complete()
#import cProfile; cProfile.run('script.complete()')
Definition
++++++++++
comp_str = set([c.word for c in completions])
if comp_str != set(literal_eval(correct)):
print('Solution @%s not right, received %s, wanted %s'\
% (line_nr - 1, comp_str, correct))
return 1
return 0
def run_definition_test(case):
"""
Definition tests use the same symbols like completion tests. This is
possible because the completion tests are defined with a list::
#? int()
ab = 3; ab
Returns 1 for fail and 0 for success.
"""
def definition(correct, correct_start, path):
def defs(line_nr, indent):
s = jedi.Script(script.source, line_nr, indent, path)
return set(s.definition())
Goto
++++
should_be = set()
number = 0
for index in re.finditer('(?: +|$)', correct):
if correct == ' ':
continue
# -1 for the comment, +3 because of the comment start `#? `
start = index.start()
if base.print_debug:
jedi.set_debug_function(None)
number += 1
try:
should_be |= defs(line_nr - 1, start + correct_start)
except Exception:
print('could not resolve %s indent %s' % (line_nr - 1, start))
raise
if base.print_debug:
jedi.set_debug_function(debug.print_to_stdout)
# because the objects have different ids, `repr` it, then compare it.
should_str = set(r.desc_with_module for r in should_be)
if len(should_str) < number:
raise Exception('Solution @%s not right, too few test results: %s'
% (line_nr - 1, should_str))
return should_str
(correct, line_nr, column, start, line) = \
(case.correct, case.line_nr, case.column, case.start, case.line)
script = case.script()
should_str = definition(correct, start, script.source_path)
result = script.definition()
is_str = set(r.desc_with_module for r in result)
if is_str != should_str:
print('Solution @%s not right, received %s, wanted %s' \
% (line_nr - 1, is_str, should_str))
return 1
return 0
def run_goto_test(case):
"""
Tests look like this::
abc = 1
@@ -165,51 +87,26 @@ def run_goto_test(case):
#! 2 ['abc=1']
abc
Returns 1 for fail and 0 for success.
"""
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
result = script.goto()
comp_str = str(sorted(str(r.description) for r in result))
if comp_str != correct:
print('Solution @%s not right, received %s, wanted %s'\
% (line_nr - 1, comp_str, correct))
return 1
return 0
Related Names
+++++++++++++
def run_related_name_test(case):
"""
Tests look like this::
abc = 1
#< abc@1,0 abc@3,0
abc
Returns 1 for fail and 0 for success.
"""
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
result = script.related_names()
correct = correct.strip()
compare = sorted((r.module_name, r.start_pos[0], r.start_pos[1])
for r in result)
wanted = []
if not correct:
positions = []
else:
positions = literal_eval(correct)
for pos_tup in positions:
if type(pos_tup[0]) == str:
# this means that there is a module specified
wanted.append(pos_tup)
else:
wanted.append(('renaming', line_nr + pos_tup[0], pos_tup[1]))
import os
import re
wanted = sorted(wanted)
if compare != wanted:
print('Solution @%s not right, received %s, wanted %s'\
% (line_nr - 1, compare, wanted))
return 1
return 0
import jedi
from jedi._compatibility import unicode, StringIO, reduce, is_py25
TEST_COMPLETIONS = 0
TEST_DEFINITIONS = 1
TEST_ASSIGNMENTS = 2
TEST_USAGES = 3
class IntegrationTestCase(object):
@@ -223,6 +120,7 @@ class IntegrationTestCase(object):
self.start = start
self.line = line
self.path = path
self.skip = None
def __repr__(self):
name = os.path.basename(self.path) if self.path else None
@@ -274,7 +172,7 @@ def collect_file_tests(lines, lines_to_execute):
correct = None
def collect_dir_tests(base_dir, test_files, thirdparty=False):
def collect_dir_tests(base_dir, test_files, check_thirdparty=False):
for f_name in os.listdir(base_dir):
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
@@ -283,93 +181,23 @@ def collect_dir_tests(base_dir, test_files, thirdparty=False):
# only has these features partially.
if is_py25 and f_name in ['generators.py', 'types.py']:
continue
path = os.path.join(base_dir, f_name)
source = open(path).read()
for case in collect_file_tests(StringIO(source),
lines_to_execute):
case.path = path
case.source = source
yield case
def run_test(cases):
"""
This is the completion test for some cases. The tests are not unit test
like, they are rather integration tests.
"""
testers = {
TEST_COMPLETIONS: run_completion_test,
TEST_DEFINITIONS: run_definition_test,
TEST_ASSIGNMENTS: run_goto_test,
TEST_USAGES: run_related_name_test,
}
tests = 0
fails = 0
for case in cases:
tests += 1
try:
fails += testers[case.test_type](case)
except Exception:
print(traceback.format_exc())
print(case)
fails += 1
return tests, fails
def test_dir(completion_test_dir, thirdparty=False):
for (path, cases) in itertools.groupby(
collect_dir_tests(completion_test_dir, test_files, thirdparty),
lambda case: case.path):
f_name = os.path.basename(path)
if thirdparty:
skip = None
if check_thirdparty:
lib = f_name.replace('_.py', '')
try:
# there is always an underline at the end.
# It looks like: completion/thirdparty/pylab_.py
__import__(lib)
except ImportError:
base.summary.append('Thirdparty-Library %s not found.' %
f_name)
continue
skip = 'Thirdparty-Library %s not found.' % lib
num_tests, fails = run_test(cases)
base.test_sum += num_tests
s = 'run %s tests with %s fails (%s)' % (num_tests, fails, f_name)
base.tests_fail += fails
print(s)
base.summary.append(s)
if __name__ == '__main__':
try:
i = sys.argv.index('--thirdparty')
thirdparty = True
sys.argv = sys.argv[:i] + sys.argv[i + 1:]
except ValueError:
thirdparty = False
test_files = base.get_test_list()
# completion tests:
completion_test_dir = os.path.join(base.test_dir, 'completion')
# execute tests
test_dir(completion_test_dir)
if test_files or thirdparty:
completion_test_dir += '/thirdparty'
test_dir(completion_test_dir, thirdparty=True)
base.print_summary()
#from guppy import hpy
#hpy()
#print hpy().heap()
exit_code = 1 if base.tests_fail else 0
if sys.hexversion < 0x02060000 and base.tests_fail <= 9:
# Python 2.5 has major incompabillities (e.g. no property.setter),
# therefore it is not possible to pass all tests.
exit_code = 0
sys.exit(exit_code)
path = os.path.join(base_dir, f_name)
source = open(path).read()
for case in collect_file_tests(StringIO(source),
lines_to_execute):
case.path = path
case.source = source
if skip:
case.skip = skip
yield case

View File

@@ -1,8 +0,0 @@
set -e
python regression.py
python run.py
echo
python refactor.py
echo
nosetests --with-doctest --doctest-tests ../jedi/

53
test/test_api_classes.py Normal file
View File

@@ -0,0 +1,53 @@
import textwrap
import pytest
from jedi import api
def make_definitions():
"""
Return a list of definitions for parametrized tests.
:rtype: [jedi.api_classes.BaseDefinition]
"""
source = textwrap.dedent("""
import sys
class C:
pass
x = C()
def f():
pass
def g():
yield
h = lambda: None
""")
definitions = []
definitions += api.defined_names(source)
source += textwrap.dedent("""
variable = sys or C or x or f or g or g() or h""")
lines = source.splitlines()
script = api.Script(source, len(lines), len('variable'), None)
definitions += script.definition()
script2 = api.Script(source, 4, len('class C'), None)
definitions += script2.related_names()
source_param = "def f(a): return a"
script_param = api.Script(source_param, 1, len(source_param), None)
definitions += script_param.goto()
return definitions
@pytest.mark.parametrize('definition', make_definitions())
def test_basedefinition_type(definition):
assert definition.type in ('module', 'class', 'instance', 'function',
'generator', 'statement', 'import', 'param')

54
test/test_cache.py Normal file
View File

@@ -0,0 +1,54 @@
import pytest
from jedi import settings
from jedi.cache import ParserCacheItem, _ModulePickling
ModulePickling = _ModulePickling()
def test_modulepickling_change_cache_dir(monkeypatch, tmpdir):
"""
ModulePickling should not save old cache when cache_directory is changed.
See: `#168 <https://github.com/davidhalter/jedi/pull/168>`_
"""
dir_1 = str(tmpdir.mkdir('first'))
dir_2 = str(tmpdir.mkdir('second'))
item_1 = ParserCacheItem('fake parser 1')
item_2 = ParserCacheItem('fake parser 2')
path_1 = 'fake path 1'
path_2 = 'fake path 2'
monkeypatch.setattr(settings, 'cache_directory', dir_1)
ModulePickling.save_module(path_1, item_1)
cached = load_stored_item(ModulePickling, path_1, item_1)
assert cached == item_1.parser
monkeypatch.setattr(settings, 'cache_directory', dir_2)
ModulePickling.save_module(path_2, item_2)
cached = load_stored_item(ModulePickling, path_1, item_1)
assert cached is None
def load_stored_item(cache, path, item):
"""Load `item` stored at `path` in `cache`."""
return cache.load_module(path, item.change_time - 1)
@pytest.mark.usefixtures("isolated_jedi_cache")
def test_modulepickling_delete_incompatible_cache():
item = ParserCacheItem('fake parser')
path = 'fake path'
cache1 = _ModulePickling()
cache1.version = 1
cache1.save_module(path, item)
cached1 = load_stored_item(cache1, path, item)
assert cached1 == item.parser
cache2 = _ModulePickling()
cache2.version = 2
cached2 = load_stored_item(cache2, path, item)
assert cached2 is None

View File

@@ -1,23 +1,38 @@
import os
import re
from run import \
import pytest
from . import base
from .run import \
TEST_COMPLETIONS, TEST_DEFINITIONS, TEST_ASSIGNMENTS, TEST_USAGES
import jedi
from jedi._compatibility import literal_eval
def assert_case_equal(case, actual, desired):
"""
Assert ``actual == desired`` with formatted message.
This is not needed for typical py.test use case, but as we need
``--assert=plain`` (see ../pytest.ini) to workaround some issue
due to py.test magic, let's format the message by hand.
"""
assert actual == desired, """
Test %r failed.
actual = %s
desired = %s
""" % (case, actual, desired)
def run_completion_test(case):
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
completions = script.complete()
#import cProfile; cProfile.run('script.complete()')
comp_str = set([c.word for c in completions])
if comp_str != set(literal_eval(correct)):
raise AssertionError(
'Solution @%s not right, received %s, wanted %s'\
% (line_nr - 1, comp_str, correct))
assert_case_equal(case, comp_str, set(literal_eval(correct)))
def run_definition_test(case):
@@ -52,19 +67,14 @@ def run_definition_test(case):
should_str = definition(correct, start, script.source_path)
result = script.definition()
is_str = set(r.desc_with_module for r in result)
if is_str != should_str:
raise AssertionError(
'Solution @%s not right, received %s, wanted %s'
% (line_nr - 1, is_str, should_str))
assert_case_equal(case, is_str, should_str)
def run_goto_test(case):
(script, correct, line_nr) = (case.script(), case.correct, case.line_nr)
result = script.goto()
comp_str = str(sorted(str(r.description) for r in result))
if comp_str != correct:
raise AssertionError('Solution @%s not right, received %s, wanted %s'
% (line_nr - 1, comp_str, correct))
assert_case_equal(case, comp_str, correct)
def run_related_name_test(case):
@@ -85,14 +95,13 @@ def run_related_name_test(case):
else:
wanted.append(('renaming', line_nr + pos_tup[0], pos_tup[1]))
wanted = sorted(wanted)
if compare != wanted:
raise AssertionError('Solution @%s not right, received %s, wanted %s'
% (line_nr - 1, compare, wanted))
assert_case_equal(case, compare, sorted(wanted))
def test_integration(case, monkeypatch, pytestconfig):
repo_root = os.path.dirname(os.path.dirname(pytestconfig.option.base_dir))
if case.skip is not None:
pytest.skip(case.skip)
repo_root = base.root_dir
monkeypatch.chdir(os.path.join(repo_root, 'jedi'))
testers = {
TEST_COMPLETIONS: run_completion_test,
@@ -100,4 +109,15 @@ def test_integration(case, monkeypatch, pytestconfig):
TEST_ASSIGNMENTS: run_goto_test,
TEST_USAGES: run_related_name_test,
}
testers[case.test_type](case)
base.skip_py25_fails(testers[case.test_type])(case)
def test_refactor(refactor_case):
"""
Run refactoring test case.
:type refactor_case: :class:`.refactor.RefactoringCase`
"""
refactor_case.run()
assert_case_equal(refactor_case,
refactor_case.result, refactor_case.desired)

View File

@@ -11,12 +11,12 @@ import itertools
import os
import textwrap
from base import TestBase, unittest, cwd_at
from .base import TestBase, unittest, cwd_at
import jedi
from jedi._compatibility import is_py25, utf8, unicode
from jedi import api
from jedi import api_classes
api_classes = api.api_classes
#jedi.set_debug_function(jedi.debug.print_to_stdout)
@@ -317,7 +317,7 @@ class TestRegression(TestBase):
# attributes
objs = itertools.chain.from_iterable(r.follow_definition() for r in c)
types = [o.type for o in objs]
assert 'Import' not in types and 'Class' in types
assert 'import' not in types and 'class' in types
def test_keyword_definition_doc(self):
""" github jedi-vim issue #44 """
@@ -512,7 +512,7 @@ class TestSpeed(TestBase):
return wrapper
return decorated
@_check_speed(0.1)
@_check_speed(0.2)
def test_os_path_join(self):
s = "from posixpath import join; join('', '')."
assert len(self.complete(s)) > 10 # is a str completion
@@ -524,5 +524,55 @@ class TestSpeed(TestBase):
script.function_definition()
#print(jedi.imports.imports_processed)
def test_settings_module():
"""
jedi.settings and jedi.cache.settings must be the same module.
"""
from jedi import cache
from jedi import settings
assert cache.settings is settings
def test_no_duplicate_modules():
"""
Make sure that import hack works as expected.
Jedi does an import hack (see: jedi/__init__.py) to have submodules
with circular dependencies. The modules in this circular dependency
"loop" must be imported by ``import <module>`` rather than normal
``from jedi import <module>`` (or ``from . jedi ...``). This test
make sure that this is satisfied.
See also:
- `#160 <https://github.com/davidhalter/jedi/issues/160>`_
- `#161 <https://github.com/davidhalter/jedi/issues/161>`_
"""
import sys
jedipath = os.path.dirname(os.path.abspath(jedi.__file__))
def is_submodule(m):
try:
filepath = m.__file__
except AttributeError:
return False
return os.path.abspath(filepath).startswith(jedipath)
modules = list(filter(is_submodule, sys.modules.values()))
top_modules = [m for m in modules if not m.__name__.startswith('jedi.')]
for m in modules:
if m is jedi:
# py.test automatically improts `jedi.*` when --doctest-modules
# is given. So this test cannot succeeds.
continue
for tm in top_modules:
try:
imported = getattr(m, tm.__name__)
except AttributeError:
continue
assert imported is tm
if __name__ == '__main__':
unittest.main()

18
tox.ini
View File

@@ -1,16 +1,10 @@
[tox]
envlist = py25, py26, py27, py32
envlist = py25, py26, py27, py32, py33
[testenv]
setenv =
XDG_CACHE_HOME={envtmpdir}/cache
deps =
nose
pytest
commands =
python regression.py
python run.py
python refactor.py
nosetests --with-doctest --doctest-tests {toxinidir}/jedi
changedir = test
py.test []
[testenv:py25]
deps =
simplejson
@@ -20,3 +14,9 @@ deps =
deps =
unittest2
{[testenv]deps}
[testenv:cov]
deps =
pytest-cov
{[testenv]deps}
commands =
py.test --cov jedi []