mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-08 23:04:48 +08:00
merge with dev branch
This commit is contained in:
@@ -75,6 +75,9 @@ Jedi really understands your Python code. For a comprehensive list what Jedi can
|
||||
do, see: https://jedi.readthedocs.org/en/latest/docs/features.html. A list of
|
||||
caveats can be found on the same page.
|
||||
|
||||
You can run Jedi on cPython 2.6, 2.7, 3.2 or 3.3, but it should also
|
||||
understand/parse code older than those versions.
|
||||
|
||||
Tips on how to use Jedi efficiently can be found here:
|
||||
https://jedi.readthedocs.org/en/latest/docs/recipes.html
|
||||
|
||||
@@ -102,14 +105,12 @@ The test suite depends on ``tox`` and ``pytest``::
|
||||
|
||||
To run the tests for all supported Python versions::
|
||||
|
||||
PIP_INSECURE=t tox
|
||||
tox
|
||||
|
||||
If you want to test only a specific Python version (e.g. Python 2.7), it's as
|
||||
easy as ::
|
||||
|
||||
tox -e py27
|
||||
|
||||
The ``PIP_INSECURE=t`` env variable is only needed for the ``py25`` target.
|
||||
|
||||
Tests are also run automatically on `Travis CI
|
||||
<https://travis-ci.org/davidhalter/jedi/>`_.
|
||||
|
||||
@@ -9,7 +9,7 @@ Features and Caveats
|
||||
General Features
|
||||
----------------
|
||||
|
||||
- python 2.5+ and 3.2+ support
|
||||
- python 2.6+ and 3.2+ support
|
||||
- ignores syntax errors and wrong indentation
|
||||
- can deal with complex module / function / class structures
|
||||
- virtualenv support
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
"""
|
||||
To ensure compatibility from Python ``2.5`` - ``3.2``, a module has been
|
||||
To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been
|
||||
created. Clearly there is huge need to use conforming syntax. But many changes
|
||||
(e.g. ``property``, ``hasattr`` in ``2.5``) can be rewritten in pure python.
|
||||
|
||||
Most of the code here is necessary to support Python 2.5. Once this dependency
|
||||
will be dropped, we'll get rid of most code.
|
||||
"""
|
||||
import sys
|
||||
import imp
|
||||
@@ -16,7 +13,7 @@ except:
|
||||
|
||||
is_py3k = sys.hexversion >= 0x03000000
|
||||
is_py33 = sys.hexversion >= 0x03030000
|
||||
is_py25 = sys.hexversion < 0x02060000
|
||||
|
||||
|
||||
def find_module_py33(string, path=None):
|
||||
mod_info = (None, None, None)
|
||||
@@ -48,6 +45,7 @@ def find_module_py33(string, path=None):
|
||||
|
||||
return mod_info
|
||||
|
||||
|
||||
def find_module_pre_py33(string, path=None):
|
||||
mod_info = None
|
||||
if path is None:
|
||||
@@ -57,6 +55,7 @@ def find_module_pre_py33(string, path=None):
|
||||
|
||||
return (mod_info[0], mod_info[1], mod_info[2][2] == imp.PKG_DIRECTORY)
|
||||
|
||||
|
||||
def find_module(string, path=None):
|
||||
"""Provides information about a module.
|
||||
|
||||
@@ -88,34 +87,6 @@ except NameError:
|
||||
else:
|
||||
return default
|
||||
|
||||
# ast module was defined in python 2.6
|
||||
try:
|
||||
from ast import literal_eval
|
||||
except ImportError:
|
||||
literal_eval = eval
|
||||
|
||||
|
||||
# properties in 2.5
|
||||
try:
|
||||
property.setter
|
||||
except AttributeError:
|
||||
class property(property):
|
||||
def __init__(self, fget, *args, **kwargs):
|
||||
self.__doc__ = fget.__doc__
|
||||
super(property, self).__init__(fget, *args, **kwargs)
|
||||
|
||||
def setter(self, fset):
|
||||
cls_ns = sys._getframe(1).f_locals
|
||||
for k, v in cls_ns.iteritems():
|
||||
if v == self:
|
||||
propname = k
|
||||
break
|
||||
cls_ns[propname] = property(self.fget, fset,
|
||||
self.fdel, self.__doc__)
|
||||
return cls_ns[propname]
|
||||
else:
|
||||
property = property
|
||||
|
||||
# unicode function
|
||||
try:
|
||||
unicode = unicode
|
||||
@@ -200,66 +171,10 @@ def use_metaclass(meta, *bases):
|
||||
return meta("HackClass", bases, {})
|
||||
|
||||
try:
|
||||
from inspect import cleandoc
|
||||
except ImportError:
|
||||
# python 2.5 doesn't have this method
|
||||
import string
|
||||
|
||||
def cleandoc(doc):
|
||||
"""Clean up indentation from docstrings.
|
||||
|
||||
Any whitespace that can be uniformly removed from the second line
|
||||
onwards is removed."""
|
||||
try:
|
||||
lines = string.split(string.expandtabs(doc), '\n')
|
||||
except UnicodeError:
|
||||
return None
|
||||
else:
|
||||
# Find minimum indentation of any non-blank lines after first line.
|
||||
margin = sys.maxint
|
||||
for line in lines[1:]:
|
||||
content = len(string.lstrip(line))
|
||||
if content:
|
||||
indent = len(line) - content
|
||||
margin = min(margin, indent)
|
||||
# Remove indentation.
|
||||
if lines:
|
||||
lines[0] = lines[0].lstrip()
|
||||
if margin < sys.maxint:
|
||||
for i in range(1, len(lines)):
|
||||
lines[i] = lines[i][margin:]
|
||||
# Remove any trailing or leading blank lines.
|
||||
while lines and not lines[-1]:
|
||||
lines.pop()
|
||||
while lines and not lines[0]:
|
||||
lines.pop(0)
|
||||
return string.join(lines, '\n')
|
||||
|
||||
if is_py25:
|
||||
# adds the `itertools.chain.from_iterable` constructor
|
||||
import itertools
|
||||
|
||||
class chain(itertools.chain):
|
||||
@staticmethod
|
||||
def from_iterable(iterables):
|
||||
# chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
|
||||
for it in iterables:
|
||||
for element in it:
|
||||
yield element
|
||||
itertools.chain = chain
|
||||
del chain
|
||||
|
||||
try:
|
||||
from functools import reduce
|
||||
from functools import reduce # Python 3
|
||||
except ImportError:
|
||||
reduce = reduce
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
# python 2.5
|
||||
import simplejson as json
|
||||
|
||||
try:
|
||||
encoding = sys.stdout.encoding
|
||||
if encoding is None:
|
||||
|
||||
@@ -50,7 +50,8 @@ def _clear_caches_after_call(func):
|
||||
|
||||
|
||||
class BaseDefinition(object):
|
||||
_mapping = {'posixpath': 'os.path',
|
||||
_mapping = {
|
||||
'posixpath': 'os.path',
|
||||
'riscospath': 'os.path',
|
||||
'ntpath': 'os.path',
|
||||
'os2emxpath': 'os.path',
|
||||
|
||||
@@ -21,6 +21,7 @@ from __future__ import with_statement
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import hashlib
|
||||
try:
|
||||
import cPickle as pickle
|
||||
@@ -28,7 +29,6 @@ except:
|
||||
import pickle
|
||||
import shutil
|
||||
|
||||
from jedi._compatibility import json
|
||||
from jedi import settings
|
||||
from jedi import common
|
||||
from jedi import debug
|
||||
|
||||
@@ -46,6 +46,7 @@ def warning(*args):
|
||||
|
||||
def speed(name):
|
||||
if debug_function and enable_speed:
|
||||
global start_time
|
||||
now = time.time()
|
||||
debug_function(SPEED, 'speed: ' + '%s %s' % (name, now - start_time))
|
||||
|
||||
|
||||
@@ -529,7 +529,7 @@ def check_statement_information(stmt, search_name):
|
||||
# isinstance check
|
||||
isinst = call.execution.values
|
||||
assert len(isinst) == 2 # has two params
|
||||
obj, classes = [stmt.get_commands() for stmt in isinst]
|
||||
obj, classes = [statement.get_commands() for statement in isinst]
|
||||
assert len(obj) == 1
|
||||
assert len(classes) == 1
|
||||
assert isinstance(obj[0], pr.Call)
|
||||
|
||||
@@ -14,7 +14,7 @@ from __future__ import with_statement
|
||||
import copy
|
||||
import itertools
|
||||
|
||||
from jedi._compatibility import property, use_metaclass, next, hasattr
|
||||
from jedi._compatibility import use_metaclass, next, hasattr
|
||||
from jedi import parsing_representation as pr
|
||||
from jedi import cache
|
||||
from jedi import helpers
|
||||
|
||||
@@ -5,7 +5,7 @@ finished (and still not working as I want), I won't document it any further.
|
||||
"""
|
||||
import re
|
||||
|
||||
from jedi._compatibility import use_metaclass, property
|
||||
from jedi._compatibility import use_metaclass
|
||||
from jedi import settings
|
||||
from jedi import parsing
|
||||
from jedi import parsing_representation as pr
|
||||
|
||||
@@ -11,11 +11,7 @@ try:
|
||||
from pydoc_data import topics as pydoc_topics
|
||||
except ImportError:
|
||||
# Python 2.6
|
||||
try:
|
||||
import pydoc_topics
|
||||
except ImportError:
|
||||
# Python 2.5
|
||||
pydoc_topics = None
|
||||
|
||||
if is_py3k:
|
||||
keys = keyword.kwlist
|
||||
|
||||
@@ -19,8 +19,9 @@ import re
|
||||
import tokenizer as tokenize
|
||||
import sys
|
||||
import os
|
||||
from ast import literal_eval
|
||||
|
||||
from jedi._compatibility import exec_function, unicode, is_py25, literal_eval
|
||||
from jedi._compatibility import exec_function, unicode
|
||||
from jedi import cache
|
||||
from jedi import parsing
|
||||
from jedi import parsing_representation as pr
|
||||
@@ -383,8 +384,7 @@ def source_to_unicode(source, encoding=None):
|
||||
http://docs.python.org/2/reference/lexical_analysis.html#encoding-\
|
||||
declarations
|
||||
"""
|
||||
byte_mark = '\xef\xbb\xbf' if is_py25 else \
|
||||
literal_eval(r"b'\xef\xbb\xbf'")
|
||||
byte_mark = literal_eval(r"b'\xef\xbb\xbf'")
|
||||
if source.startswith(byte_mark):
|
||||
# UTF-8 byte-order mark
|
||||
return 'utf-8'
|
||||
|
||||
@@ -403,10 +403,10 @@ class Parser(object):
|
||||
with common.ignored(IndexError, AttributeError):
|
||||
# If string literal is being parsed
|
||||
first_tok = stmt.token_list[0]
|
||||
if (not stmt.set_vars and
|
||||
not stmt.used_vars and
|
||||
len(stmt.token_list) == 1 and
|
||||
first_tok[0] == tokenize.STRING):
|
||||
if (not stmt.set_vars
|
||||
and not stmt.used_vars
|
||||
and len(stmt.token_list) == 1
|
||||
and first_tok[0] == tokenize.STRING):
|
||||
# ... then set it as a docstring
|
||||
self.scope.statements[-1].add_docstr(first_tok[1])
|
||||
|
||||
@@ -442,7 +442,7 @@ class Parser(object):
|
||||
if self.user_position and (self.start_pos[0] == self.user_position[0]
|
||||
or self.user_scope is None
|
||||
and self.start_pos[0] >= self.user_position[0]):
|
||||
debug.dbg('user scope found [%s] = %s' % \
|
||||
debug.dbg('user scope found [%s] = %s' %
|
||||
(self.parserline.replace('\n', ''), repr(self.scope)))
|
||||
self.user_scope = self.scope
|
||||
self.last_token = self.current
|
||||
|
||||
@@ -38,9 +38,10 @@ from __future__ import with_statement
|
||||
import os
|
||||
import re
|
||||
import tokenizer as tokenize
|
||||
from inspect import cleandoc
|
||||
from ast import literal_eval
|
||||
|
||||
from jedi._compatibility import next, literal_eval, cleandoc, Python3Method, \
|
||||
encoding, property, unicode, is_py3k
|
||||
from jedi._compatibility import next, Python3Method, encoding, unicode, is_py3k
|
||||
from jedi import common
|
||||
from jedi import debug
|
||||
|
||||
@@ -1276,7 +1277,8 @@ class Array(Call):
|
||||
return zip(self.keys, self.values)
|
||||
|
||||
def get_code(self):
|
||||
map = {self.NOARRAY: '(%s)',
|
||||
map = {
|
||||
self.NOARRAY: '(%s)',
|
||||
self.TUPLE: '(%s)',
|
||||
self.LIST: '[%s]',
|
||||
self.DICT: '{%s}',
|
||||
|
||||
@@ -35,7 +35,7 @@ class RecursionDecorator(object):
|
||||
def push_stmt(self, stmt):
|
||||
self.current = RecursionNode(stmt, self.current)
|
||||
check = self._check_recursion()
|
||||
if check:# TODO remove False!!!!
|
||||
if check: # TODO remove False!!!!
|
||||
debug.warning('catched stmt recursion: %s against %s @%s'
|
||||
% (stmt, check.stmt, stmt.start_pos))
|
||||
self.pop_stmt()
|
||||
|
||||
@@ -26,15 +26,25 @@ ENCODING = N_TOKENS + 2
|
||||
tok_name[ENCODING] = 'ENCODING'
|
||||
N_TOKENS += 3
|
||||
|
||||
|
||||
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
|
||||
def __repr__(self):
|
||||
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
|
||||
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
|
||||
self._replace(type=annotated_type))
|
||||
|
||||
def group(*choices): return '(' + '|'.join(choices) + ')'
|
||||
def any(*choices): return group(*choices) + '*'
|
||||
def maybe(*choices): return group(*choices) + '?'
|
||||
|
||||
def group(*choices):
|
||||
return '(' + '|'.join(choices) + ')'
|
||||
|
||||
|
||||
def any(*choices):
|
||||
return group(*choices) + '*'
|
||||
|
||||
|
||||
def maybe(*choices):
|
||||
return group(*choices) + '?'
|
||||
|
||||
|
||||
# Note: we use unicode matching for names ("\w") but ascii matching for
|
||||
# number literals.
|
||||
@@ -91,9 +101,11 @@ ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
|
||||
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
|
||||
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
|
||||
|
||||
|
||||
def _compile(expr):
|
||||
return re.compile(expr, re.UNICODE)
|
||||
|
||||
|
||||
tokenprog, pseudoprog, single3prog, double3prog = map(
|
||||
_compile, (Token, PseudoToken, Single3, Double3))
|
||||
endprogs = {"'": _compile(Single), '"': _compile(Double),
|
||||
@@ -120,14 +132,16 @@ for t in ("'", '"',
|
||||
"r'", 'r"', "R'", 'R"',
|
||||
"b'", 'b"', "B'", 'B"',
|
||||
"br'", 'br"', "Br'", 'Br"',
|
||||
"bR'", 'bR"', "BR'", 'BR"' ):
|
||||
"bR'", 'bR"', "BR'", 'BR"'):
|
||||
single_quoted[t] = t
|
||||
|
||||
del _compile
|
||||
|
||||
tabsize = 8
|
||||
|
||||
class TokenError(Exception): pass
|
||||
|
||||
class TokenError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def generate_tokens(readline):
|
||||
@@ -169,13 +183,14 @@ def generate_tokens(readline):
|
||||
continue
|
||||
|
||||
elif parenlev == 0 and not continued: # new statement
|
||||
if not line: break
|
||||
if not line:
|
||||
break
|
||||
column = 0
|
||||
while pos < max: # measure leading whitespace
|
||||
if line[pos] == ' ':
|
||||
column += 1
|
||||
elif line[pos] == '\t':
|
||||
column = (column//tabsize + 1)*tabsize
|
||||
column = (column // tabsize + 1) * tabsize
|
||||
elif line[pos] == '\f':
|
||||
column = 0
|
||||
else:
|
||||
|
||||
1
setup.py
1
setup.py
@@ -32,7 +32,6 @@ setup(name='jedi',
|
||||
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
|
||||
'Operating System :: OS Independent',
|
||||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 2.5',
|
||||
'Programming Language :: Python :: 2.6',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
|
||||
@@ -98,14 +98,14 @@ Tests look like this::
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
from ast import literal_eval
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
sys.path.insert(0, '..')
|
||||
|
||||
import jedi
|
||||
from jedi._compatibility import unicode, StringIO, reduce, is_py25, \
|
||||
literal_eval
|
||||
from jedi._compatibility import unicode, reduce, StringIO
|
||||
|
||||
|
||||
TEST_COMPLETIONS = 0
|
||||
@@ -259,11 +259,6 @@ def collect_dir_tests(base_dir, test_files, check_thirdparty=False):
|
||||
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
|
||||
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
|
||||
if f_name.endswith(".py") and (not test_files or files_to_execute):
|
||||
# for python2.5 certain tests are not being done, because it
|
||||
# only has these features partially.
|
||||
if is_py25 and f_name in ['generators.py', 'types.py']:
|
||||
continue
|
||||
|
||||
skip = None
|
||||
if check_thirdparty:
|
||||
lib = f_name.replace('_.py', '')
|
||||
|
||||
@@ -14,7 +14,7 @@ import textwrap
|
||||
from .base import TestBase, unittest, cwd_at
|
||||
|
||||
import jedi
|
||||
from jedi._compatibility import is_py25, utf8, unicode
|
||||
from jedi._compatibility import utf8, unicode
|
||||
from jedi import api
|
||||
api_classes = api.api_classes
|
||||
|
||||
@@ -86,7 +86,6 @@ class TestRegression(TestBase):
|
||||
def test_keyword_doc(self):
|
||||
r = list(self.definition("or", (1, 1)))
|
||||
assert len(r) == 1
|
||||
if not is_py25:
|
||||
assert len(r[0].doc) > 100
|
||||
|
||||
r = list(self.definition("asfdasfd", (1, 1)))
|
||||
@@ -95,7 +94,6 @@ class TestRegression(TestBase):
|
||||
def test_operator_doc(self):
|
||||
r = list(self.definition("a == b", (1, 3)))
|
||||
assert len(r) == 1
|
||||
if not is_py25:
|
||||
assert len(r[0].doc) > 100
|
||||
|
||||
def test_function_call_signature(self):
|
||||
@@ -333,8 +331,6 @@ class TestRegression(TestBase):
|
||||
assert [d.doc for d in defs]
|
||||
|
||||
def test_goto_following_on_imports(self):
|
||||
if is_py25:
|
||||
return
|
||||
g = self.goto("import multiprocessing.dummy; multiprocessing.dummy")
|
||||
assert len(g) == 1
|
||||
assert g[0].start_pos != (0, 0)
|
||||
|
||||
9
tox.ini
9
tox.ini
@@ -1,15 +1,10 @@
|
||||
[tox]
|
||||
envlist = py25, py26, py27, py32, py33
|
||||
envlist = py26, py27, py32, py33
|
||||
[testenv]
|
||||
deps =
|
||||
pytest
|
||||
https://bitbucket.org/hpk42/pytest/get/c4f58165e0d4.zip
|
||||
commands =
|
||||
py.test []
|
||||
[testenv:py25]
|
||||
deps =
|
||||
simplejson
|
||||
unittest2
|
||||
{[testenv]deps}
|
||||
[testenv:py26]
|
||||
deps =
|
||||
unittest2
|
||||
|
||||
Reference in New Issue
Block a user