mirror of
https://github.com/davidhalter/jedi.git
synced 2025-12-08 23:04:48 +08:00
merge with dev branch
This commit is contained in:
@@ -75,6 +75,9 @@ Jedi really understands your Python code. For a comprehensive list what Jedi can
|
|||||||
do, see: https://jedi.readthedocs.org/en/latest/docs/features.html. A list of
|
do, see: https://jedi.readthedocs.org/en/latest/docs/features.html. A list of
|
||||||
caveats can be found on the same page.
|
caveats can be found on the same page.
|
||||||
|
|
||||||
|
You can run Jedi on cPython 2.6, 2.7, 3.2 or 3.3, but it should also
|
||||||
|
understand/parse code older than those versions.
|
||||||
|
|
||||||
Tips on how to use Jedi efficiently can be found here:
|
Tips on how to use Jedi efficiently can be found here:
|
||||||
https://jedi.readthedocs.org/en/latest/docs/recipes.html
|
https://jedi.readthedocs.org/en/latest/docs/recipes.html
|
||||||
|
|
||||||
@@ -102,14 +105,12 @@ The test suite depends on ``tox`` and ``pytest``::
|
|||||||
|
|
||||||
To run the tests for all supported Python versions::
|
To run the tests for all supported Python versions::
|
||||||
|
|
||||||
PIP_INSECURE=t tox
|
tox
|
||||||
|
|
||||||
If you want to test only a specific Python version (e.g. Python 2.7), it's as
|
If you want to test only a specific Python version (e.g. Python 2.7), it's as
|
||||||
easy as ::
|
easy as ::
|
||||||
|
|
||||||
tox -e py27
|
tox -e py27
|
||||||
|
|
||||||
The ``PIP_INSECURE=t`` env variable is only needed for the ``py25`` target.
|
|
||||||
|
|
||||||
Tests are also run automatically on `Travis CI
|
Tests are also run automatically on `Travis CI
|
||||||
<https://travis-ci.org/davidhalter/jedi/>`_.
|
<https://travis-ci.org/davidhalter/jedi/>`_.
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Features and Caveats
|
|||||||
General Features
|
General Features
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
- python 2.5+ and 3.2+ support
|
- python 2.6+ and 3.2+ support
|
||||||
- ignores syntax errors and wrong indentation
|
- ignores syntax errors and wrong indentation
|
||||||
- can deal with complex module / function / class structures
|
- can deal with complex module / function / class structures
|
||||||
- virtualenv support
|
- virtualenv support
|
||||||
|
|||||||
@@ -1,10 +1,7 @@
|
|||||||
"""
|
"""
|
||||||
To ensure compatibility from Python ``2.5`` - ``3.2``, a module has been
|
To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been
|
||||||
created. Clearly there is huge need to use conforming syntax. But many changes
|
created. Clearly there is huge need to use conforming syntax. But many changes
|
||||||
(e.g. ``property``, ``hasattr`` in ``2.5``) can be rewritten in pure python.
|
(e.g. ``property``, ``hasattr`` in ``2.5``) can be rewritten in pure python.
|
||||||
|
|
||||||
Most of the code here is necessary to support Python 2.5. Once this dependency
|
|
||||||
will be dropped, we'll get rid of most code.
|
|
||||||
"""
|
"""
|
||||||
import sys
|
import sys
|
||||||
import imp
|
import imp
|
||||||
@@ -16,7 +13,7 @@ except:
|
|||||||
|
|
||||||
is_py3k = sys.hexversion >= 0x03000000
|
is_py3k = sys.hexversion >= 0x03000000
|
||||||
is_py33 = sys.hexversion >= 0x03030000
|
is_py33 = sys.hexversion >= 0x03030000
|
||||||
is_py25 = sys.hexversion < 0x02060000
|
|
||||||
|
|
||||||
def find_module_py33(string, path=None):
|
def find_module_py33(string, path=None):
|
||||||
mod_info = (None, None, None)
|
mod_info = (None, None, None)
|
||||||
@@ -48,6 +45,7 @@ def find_module_py33(string, path=None):
|
|||||||
|
|
||||||
return mod_info
|
return mod_info
|
||||||
|
|
||||||
|
|
||||||
def find_module_pre_py33(string, path=None):
|
def find_module_pre_py33(string, path=None):
|
||||||
mod_info = None
|
mod_info = None
|
||||||
if path is None:
|
if path is None:
|
||||||
@@ -57,6 +55,7 @@ def find_module_pre_py33(string, path=None):
|
|||||||
|
|
||||||
return (mod_info[0], mod_info[1], mod_info[2][2] == imp.PKG_DIRECTORY)
|
return (mod_info[0], mod_info[1], mod_info[2][2] == imp.PKG_DIRECTORY)
|
||||||
|
|
||||||
|
|
||||||
def find_module(string, path=None):
|
def find_module(string, path=None):
|
||||||
"""Provides information about a module.
|
"""Provides information about a module.
|
||||||
|
|
||||||
@@ -88,34 +87,6 @@ except NameError:
|
|||||||
else:
|
else:
|
||||||
return default
|
return default
|
||||||
|
|
||||||
# ast module was defined in python 2.6
|
|
||||||
try:
|
|
||||||
from ast import literal_eval
|
|
||||||
except ImportError:
|
|
||||||
literal_eval = eval
|
|
||||||
|
|
||||||
|
|
||||||
# properties in 2.5
|
|
||||||
try:
|
|
||||||
property.setter
|
|
||||||
except AttributeError:
|
|
||||||
class property(property):
|
|
||||||
def __init__(self, fget, *args, **kwargs):
|
|
||||||
self.__doc__ = fget.__doc__
|
|
||||||
super(property, self).__init__(fget, *args, **kwargs)
|
|
||||||
|
|
||||||
def setter(self, fset):
|
|
||||||
cls_ns = sys._getframe(1).f_locals
|
|
||||||
for k, v in cls_ns.iteritems():
|
|
||||||
if v == self:
|
|
||||||
propname = k
|
|
||||||
break
|
|
||||||
cls_ns[propname] = property(self.fget, fset,
|
|
||||||
self.fdel, self.__doc__)
|
|
||||||
return cls_ns[propname]
|
|
||||||
else:
|
|
||||||
property = property
|
|
||||||
|
|
||||||
# unicode function
|
# unicode function
|
||||||
try:
|
try:
|
||||||
unicode = unicode
|
unicode = unicode
|
||||||
@@ -200,66 +171,10 @@ def use_metaclass(meta, *bases):
|
|||||||
return meta("HackClass", bases, {})
|
return meta("HackClass", bases, {})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from inspect import cleandoc
|
from functools import reduce # Python 3
|
||||||
except ImportError:
|
|
||||||
# python 2.5 doesn't have this method
|
|
||||||
import string
|
|
||||||
|
|
||||||
def cleandoc(doc):
|
|
||||||
"""Clean up indentation from docstrings.
|
|
||||||
|
|
||||||
Any whitespace that can be uniformly removed from the second line
|
|
||||||
onwards is removed."""
|
|
||||||
try:
|
|
||||||
lines = string.split(string.expandtabs(doc), '\n')
|
|
||||||
except UnicodeError:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
# Find minimum indentation of any non-blank lines after first line.
|
|
||||||
margin = sys.maxint
|
|
||||||
for line in lines[1:]:
|
|
||||||
content = len(string.lstrip(line))
|
|
||||||
if content:
|
|
||||||
indent = len(line) - content
|
|
||||||
margin = min(margin, indent)
|
|
||||||
# Remove indentation.
|
|
||||||
if lines:
|
|
||||||
lines[0] = lines[0].lstrip()
|
|
||||||
if margin < sys.maxint:
|
|
||||||
for i in range(1, len(lines)):
|
|
||||||
lines[i] = lines[i][margin:]
|
|
||||||
# Remove any trailing or leading blank lines.
|
|
||||||
while lines and not lines[-1]:
|
|
||||||
lines.pop()
|
|
||||||
while lines and not lines[0]:
|
|
||||||
lines.pop(0)
|
|
||||||
return string.join(lines, '\n')
|
|
||||||
|
|
||||||
if is_py25:
|
|
||||||
# adds the `itertools.chain.from_iterable` constructor
|
|
||||||
import itertools
|
|
||||||
|
|
||||||
class chain(itertools.chain):
|
|
||||||
@staticmethod
|
|
||||||
def from_iterable(iterables):
|
|
||||||
# chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
|
|
||||||
for it in iterables:
|
|
||||||
for element in it:
|
|
||||||
yield element
|
|
||||||
itertools.chain = chain
|
|
||||||
del chain
|
|
||||||
|
|
||||||
try:
|
|
||||||
from functools import reduce
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
reduce = reduce
|
reduce = reduce
|
||||||
|
|
||||||
try:
|
|
||||||
import json
|
|
||||||
except ImportError:
|
|
||||||
# python 2.5
|
|
||||||
import simplejson as json
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
encoding = sys.stdout.encoding
|
encoding = sys.stdout.encoding
|
||||||
if encoding is None:
|
if encoding is None:
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ class Script(object):
|
|||||||
bs = builtin.Builtin.scope
|
bs = builtin.Builtin.scope
|
||||||
if isinstance(u, pr.Import):
|
if isinstance(u, pr.Import):
|
||||||
if (u.relative_count > 0 or u.from_ns) and not re.search(
|
if (u.relative_count > 0 or u.from_ns) and not re.search(
|
||||||
r'(,|from)\s*$|import\s+$', completion_line):
|
r'(,|from)\s*$|import\s+$', completion_line):
|
||||||
completions += ((k, bs) for k
|
completions += ((k, bs) for k
|
||||||
in keywords.get_keywords('import'))
|
in keywords.get_keywords('import'))
|
||||||
|
|
||||||
|
|||||||
@@ -50,19 +50,20 @@ def _clear_caches_after_call(func):
|
|||||||
|
|
||||||
|
|
||||||
class BaseDefinition(object):
|
class BaseDefinition(object):
|
||||||
_mapping = {'posixpath': 'os.path',
|
_mapping = {
|
||||||
'riscospath': 'os.path',
|
'posixpath': 'os.path',
|
||||||
'ntpath': 'os.path',
|
'riscospath': 'os.path',
|
||||||
'os2emxpath': 'os.path',
|
'ntpath': 'os.path',
|
||||||
'macpath': 'os.path',
|
'os2emxpath': 'os.path',
|
||||||
'genericpath': 'os.path',
|
'macpath': 'os.path',
|
||||||
'posix': 'os',
|
'genericpath': 'os.path',
|
||||||
'_io': 'io',
|
'posix': 'os',
|
||||||
'_functools': 'functools',
|
'_io': 'io',
|
||||||
'_sqlite3': 'sqlite3',
|
'_functools': 'functools',
|
||||||
'__builtin__': '',
|
'_sqlite3': 'sqlite3',
|
||||||
'builtins': '',
|
'__builtin__': '',
|
||||||
}
|
'builtins': '',
|
||||||
|
}
|
||||||
|
|
||||||
_tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
|
_tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
|
||||||
'argparse._ActionsContainer': 'argparse.ArgumentParser',
|
'argparse._ActionsContainer': 'argparse.ArgumentParser',
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ from __future__ import with_statement
|
|||||||
import time
|
import time
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import json
|
||||||
import hashlib
|
import hashlib
|
||||||
try:
|
try:
|
||||||
import cPickle as pickle
|
import cPickle as pickle
|
||||||
@@ -28,7 +29,6 @@ except:
|
|||||||
import pickle
|
import pickle
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
from jedi._compatibility import json
|
|
||||||
from jedi import settings
|
from jedi import settings
|
||||||
from jedi import common
|
from jedi import common
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
|
|||||||
@@ -46,6 +46,7 @@ def warning(*args):
|
|||||||
|
|
||||||
def speed(name):
|
def speed(name):
|
||||||
if debug_function and enable_speed:
|
if debug_function and enable_speed:
|
||||||
|
global start_time
|
||||||
now = time.time()
|
now = time.time()
|
||||||
debug_function(SPEED, 'speed: ' + '%s %s' % (name, now - start_time))
|
debug_function(SPEED, 'speed: ' + '%s %s' % (name, now - start_time))
|
||||||
|
|
||||||
|
|||||||
@@ -248,7 +248,7 @@ def _scan_statement(stmt, search_name, assignment_details=False):
|
|||||||
check = list(stmt.get_commands())
|
check = list(stmt.get_commands())
|
||||||
if assignment_details:
|
if assignment_details:
|
||||||
for commands, op in stmt.assignment_details:
|
for commands, op in stmt.assignment_details:
|
||||||
check += commands
|
check += commands
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
for c in check:
|
for c in check:
|
||||||
@@ -529,7 +529,7 @@ def check_statement_information(stmt, search_name):
|
|||||||
# isinstance check
|
# isinstance check
|
||||||
isinst = call.execution.values
|
isinst = call.execution.values
|
||||||
assert len(isinst) == 2 # has two params
|
assert len(isinst) == 2 # has two params
|
||||||
obj, classes = [stmt.get_commands() for stmt in isinst]
|
obj, classes = [statement.get_commands() for statement in isinst]
|
||||||
assert len(obj) == 1
|
assert len(obj) == 1
|
||||||
assert len(classes) == 1
|
assert len(classes) == 1
|
||||||
assert isinstance(obj[0], pr.Call)
|
assert isinstance(obj[0], pr.Call)
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ from __future__ import with_statement
|
|||||||
import copy
|
import copy
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
from jedi._compatibility import property, use_metaclass, next, hasattr
|
from jedi._compatibility import use_metaclass, next, hasattr
|
||||||
from jedi import parsing_representation as pr
|
from jedi import parsing_representation as pr
|
||||||
from jedi import cache
|
from jedi import cache
|
||||||
from jedi import helpers
|
from jedi import helpers
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ finished (and still not working as I want), I won't document it any further.
|
|||||||
"""
|
"""
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from jedi._compatibility import use_metaclass, property
|
from jedi._compatibility import use_metaclass
|
||||||
from jedi import settings
|
from jedi import settings
|
||||||
from jedi import parsing
|
from jedi import parsing
|
||||||
from jedi import parsing_representation as pr
|
from jedi import parsing_representation as pr
|
||||||
|
|||||||
@@ -249,7 +249,7 @@ class ImportPath(pr.Base):
|
|||||||
sys.path = temp
|
sys.path = temp
|
||||||
raise
|
raise
|
||||||
sys.path = temp
|
sys.path = temp
|
||||||
|
|
||||||
return importing
|
return importing
|
||||||
|
|
||||||
if self.file_path:
|
if self.file_path:
|
||||||
|
|||||||
@@ -11,11 +11,7 @@ try:
|
|||||||
from pydoc_data import topics as pydoc_topics
|
from pydoc_data import topics as pydoc_topics
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# Python 2.6
|
# Python 2.6
|
||||||
try:
|
import pydoc_topics
|
||||||
import pydoc_topics
|
|
||||||
except ImportError:
|
|
||||||
# Python 2.5
|
|
||||||
pydoc_topics = None
|
|
||||||
|
|
||||||
if is_py3k:
|
if is_py3k:
|
||||||
keys = keyword.kwlist
|
keys = keyword.kwlist
|
||||||
|
|||||||
@@ -19,8 +19,9 @@ import re
|
|||||||
import tokenizer as tokenize
|
import tokenizer as tokenize
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
from ast import literal_eval
|
||||||
|
|
||||||
from jedi._compatibility import exec_function, unicode, is_py25, literal_eval
|
from jedi._compatibility import exec_function, unicode
|
||||||
from jedi import cache
|
from jedi import cache
|
||||||
from jedi import parsing
|
from jedi import parsing
|
||||||
from jedi import parsing_representation as pr
|
from jedi import parsing_representation as pr
|
||||||
@@ -383,8 +384,7 @@ def source_to_unicode(source, encoding=None):
|
|||||||
http://docs.python.org/2/reference/lexical_analysis.html#encoding-\
|
http://docs.python.org/2/reference/lexical_analysis.html#encoding-\
|
||||||
declarations
|
declarations
|
||||||
"""
|
"""
|
||||||
byte_mark = '\xef\xbb\xbf' if is_py25 else \
|
byte_mark = literal_eval(r"b'\xef\xbb\xbf'")
|
||||||
literal_eval(r"b'\xef\xbb\xbf'")
|
|
||||||
if source.startswith(byte_mark):
|
if source.startswith(byte_mark):
|
||||||
# UTF-8 byte-order mark
|
# UTF-8 byte-order mark
|
||||||
return 'utf-8'
|
return 'utf-8'
|
||||||
|
|||||||
@@ -403,10 +403,10 @@ class Parser(object):
|
|||||||
with common.ignored(IndexError, AttributeError):
|
with common.ignored(IndexError, AttributeError):
|
||||||
# If string literal is being parsed
|
# If string literal is being parsed
|
||||||
first_tok = stmt.token_list[0]
|
first_tok = stmt.token_list[0]
|
||||||
if (not stmt.set_vars and
|
if (not stmt.set_vars
|
||||||
not stmt.used_vars and
|
and not stmt.used_vars
|
||||||
len(stmt.token_list) == 1 and
|
and len(stmt.token_list) == 1
|
||||||
first_tok[0] == tokenize.STRING):
|
and first_tok[0] == tokenize.STRING):
|
||||||
# ... then set it as a docstring
|
# ... then set it as a docstring
|
||||||
self.scope.statements[-1].add_docstr(first_tok[1])
|
self.scope.statements[-1].add_docstr(first_tok[1])
|
||||||
|
|
||||||
@@ -442,7 +442,7 @@ class Parser(object):
|
|||||||
if self.user_position and (self.start_pos[0] == self.user_position[0]
|
if self.user_position and (self.start_pos[0] == self.user_position[0]
|
||||||
or self.user_scope is None
|
or self.user_scope is None
|
||||||
and self.start_pos[0] >= self.user_position[0]):
|
and self.start_pos[0] >= self.user_position[0]):
|
||||||
debug.dbg('user scope found [%s] = %s' % \
|
debug.dbg('user scope found [%s] = %s' %
|
||||||
(self.parserline.replace('\n', ''), repr(self.scope)))
|
(self.parserline.replace('\n', ''), repr(self.scope)))
|
||||||
self.user_scope = self.scope
|
self.user_scope = self.scope
|
||||||
self.last_token = self.current
|
self.last_token = self.current
|
||||||
|
|||||||
@@ -38,9 +38,10 @@ from __future__ import with_statement
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import tokenizer as tokenize
|
import tokenizer as tokenize
|
||||||
|
from inspect import cleandoc
|
||||||
|
from ast import literal_eval
|
||||||
|
|
||||||
from jedi._compatibility import next, literal_eval, cleandoc, Python3Method, \
|
from jedi._compatibility import next, Python3Method, encoding, unicode, is_py3k
|
||||||
encoding, property, unicode, is_py3k
|
|
||||||
from jedi import common
|
from jedi import common
|
||||||
from jedi import debug
|
from jedi import debug
|
||||||
|
|
||||||
@@ -1276,12 +1277,13 @@ class Array(Call):
|
|||||||
return zip(self.keys, self.values)
|
return zip(self.keys, self.values)
|
||||||
|
|
||||||
def get_code(self):
|
def get_code(self):
|
||||||
map = {self.NOARRAY: '(%s)',
|
map = {
|
||||||
self.TUPLE: '(%s)',
|
self.NOARRAY: '(%s)',
|
||||||
self.LIST: '[%s]',
|
self.TUPLE: '(%s)',
|
||||||
self.DICT: '{%s}',
|
self.LIST: '[%s]',
|
||||||
self.SET: '{%s}'
|
self.DICT: '{%s}',
|
||||||
}
|
self.SET: '{%s}'
|
||||||
|
}
|
||||||
inner = []
|
inner = []
|
||||||
for i, stmt in enumerate(self.values):
|
for i, stmt in enumerate(self.values):
|
||||||
s = ''
|
s = ''
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ class RecursionDecorator(object):
|
|||||||
def push_stmt(self, stmt):
|
def push_stmt(self, stmt):
|
||||||
self.current = RecursionNode(stmt, self.current)
|
self.current = RecursionNode(stmt, self.current)
|
||||||
check = self._check_recursion()
|
check = self._check_recursion()
|
||||||
if check:# TODO remove False!!!!
|
if check: # TODO remove False!!!!
|
||||||
debug.warning('catched stmt recursion: %s against %s @%s'
|
debug.warning('catched stmt recursion: %s against %s @%s'
|
||||||
% (stmt, check.stmt, stmt.start_pos))
|
% (stmt, check.stmt, stmt.start_pos))
|
||||||
self.pop_stmt()
|
self.pop_stmt()
|
||||||
|
|||||||
@@ -26,15 +26,25 @@ ENCODING = N_TOKENS + 2
|
|||||||
tok_name[ENCODING] = 'ENCODING'
|
tok_name[ENCODING] = 'ENCODING'
|
||||||
N_TOKENS += 3
|
N_TOKENS += 3
|
||||||
|
|
||||||
|
|
||||||
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
|
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
|
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
|
||||||
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
|
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
|
||||||
self._replace(type=annotated_type))
|
self._replace(type=annotated_type))
|
||||||
|
|
||||||
def group(*choices): return '(' + '|'.join(choices) + ')'
|
|
||||||
def any(*choices): return group(*choices) + '*'
|
def group(*choices):
|
||||||
def maybe(*choices): return group(*choices) + '?'
|
return '(' + '|'.join(choices) + ')'
|
||||||
|
|
||||||
|
|
||||||
|
def any(*choices):
|
||||||
|
return group(*choices) + '*'
|
||||||
|
|
||||||
|
|
||||||
|
def maybe(*choices):
|
||||||
|
return group(*choices) + '?'
|
||||||
|
|
||||||
|
|
||||||
# Note: we use unicode matching for names ("\w") but ascii matching for
|
# Note: we use unicode matching for names ("\w") but ascii matching for
|
||||||
# number literals.
|
# number literals.
|
||||||
@@ -91,9 +101,11 @@ ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
|
|||||||
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
|
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
|
||||||
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
|
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
|
||||||
|
|
||||||
|
|
||||||
def _compile(expr):
|
def _compile(expr):
|
||||||
return re.compile(expr, re.UNICODE)
|
return re.compile(expr, re.UNICODE)
|
||||||
|
|
||||||
|
|
||||||
tokenprog, pseudoprog, single3prog, double3prog = map(
|
tokenprog, pseudoprog, single3prog, double3prog = map(
|
||||||
_compile, (Token, PseudoToken, Single3, Double3))
|
_compile, (Token, PseudoToken, Single3, Double3))
|
||||||
endprogs = {"'": _compile(Single), '"': _compile(Double),
|
endprogs = {"'": _compile(Single), '"': _compile(Double),
|
||||||
@@ -120,14 +132,16 @@ for t in ("'", '"',
|
|||||||
"r'", 'r"', "R'", 'R"',
|
"r'", 'r"', "R'", 'R"',
|
||||||
"b'", 'b"', "B'", 'B"',
|
"b'", 'b"', "B'", 'B"',
|
||||||
"br'", 'br"', "Br'", 'Br"',
|
"br'", 'br"', "Br'", 'Br"',
|
||||||
"bR'", 'bR"', "BR'", 'BR"' ):
|
"bR'", 'bR"', "BR'", 'BR"'):
|
||||||
single_quoted[t] = t
|
single_quoted[t] = t
|
||||||
|
|
||||||
del _compile
|
del _compile
|
||||||
|
|
||||||
tabsize = 8
|
tabsize = 8
|
||||||
|
|
||||||
class TokenError(Exception): pass
|
|
||||||
|
class TokenError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def generate_tokens(readline):
|
def generate_tokens(readline):
|
||||||
@@ -169,13 +183,14 @@ def generate_tokens(readline):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
elif parenlev == 0 and not continued: # new statement
|
elif parenlev == 0 and not continued: # new statement
|
||||||
if not line: break
|
if not line:
|
||||||
|
break
|
||||||
column = 0
|
column = 0
|
||||||
while pos < max: # measure leading whitespace
|
while pos < max: # measure leading whitespace
|
||||||
if line[pos] == ' ':
|
if line[pos] == ' ':
|
||||||
column += 1
|
column += 1
|
||||||
elif line[pos] == '\t':
|
elif line[pos] == '\t':
|
||||||
column = (column//tabsize + 1)*tabsize
|
column = (column // tabsize + 1) * tabsize
|
||||||
elif line[pos] == '\f':
|
elif line[pos] == '\f':
|
||||||
column = 0
|
column = 0
|
||||||
else:
|
else:
|
||||||
@@ -218,7 +233,7 @@ def generate_tokens(readline):
|
|||||||
token, initial = line[start:end], line[start]
|
token, initial = line[start:end], line[start]
|
||||||
|
|
||||||
if (initial in numchars or # ordinary number
|
if (initial in numchars or # ordinary number
|
||||||
(initial == '.' and token != '.' and token != '...')):
|
(initial == '.' and token != '.' and token != '...')):
|
||||||
yield TokenInfo(NUMBER, token, spos, epos, line)
|
yield TokenInfo(NUMBER, token, spos, epos, line)
|
||||||
elif initial in '\r\n':
|
elif initial in '\r\n':
|
||||||
yield TokenInfo(NL if parenlev > 0 else NEWLINE,
|
yield TokenInfo(NL if parenlev > 0 else NEWLINE,
|
||||||
@@ -239,8 +254,8 @@ def generate_tokens(readline):
|
|||||||
contline = line
|
contline = line
|
||||||
break
|
break
|
||||||
elif initial in single_quoted or \
|
elif initial in single_quoted or \
|
||||||
token[:2] in single_quoted or \
|
token[:2] in single_quoted or \
|
||||||
token[:3] in single_quoted:
|
token[:3] in single_quoted:
|
||||||
if token[-1] == '\n': # continued string
|
if token[-1] == '\n': # continued string
|
||||||
strstart = (lnum, start)
|
strstart = (lnum, start)
|
||||||
endprog = (endprogs[initial] or endprogs[token[1]] or
|
endprog = (endprogs[initial] or endprogs[token[1]] or
|
||||||
|
|||||||
1
setup.py
1
setup.py
@@ -32,7 +32,6 @@ setup(name='jedi',
|
|||||||
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
|
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
|
||||||
'Operating System :: OS Independent',
|
'Operating System :: OS Independent',
|
||||||
'Programming Language :: Python :: 2',
|
'Programming Language :: Python :: 2',
|
||||||
'Programming Language :: Python :: 2.5',
|
|
||||||
'Programming Language :: Python :: 2.6',
|
'Programming Language :: Python :: 2.6',
|
||||||
'Programming Language :: Python :: 2.7',
|
'Programming Language :: Python :: 2.7',
|
||||||
'Programming Language :: Python :: 3',
|
'Programming Language :: Python :: 3',
|
||||||
|
|||||||
@@ -98,14 +98,14 @@ Tests look like this::
|
|||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
from ast import literal_eval
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
import sys
|
import sys
|
||||||
sys.path.insert(0, '..')
|
sys.path.insert(0, '..')
|
||||||
|
|
||||||
import jedi
|
import jedi
|
||||||
from jedi._compatibility import unicode, StringIO, reduce, is_py25, \
|
from jedi._compatibility import unicode, reduce, StringIO
|
||||||
literal_eval
|
|
||||||
|
|
||||||
|
|
||||||
TEST_COMPLETIONS = 0
|
TEST_COMPLETIONS = 0
|
||||||
@@ -259,11 +259,6 @@ def collect_dir_tests(base_dir, test_files, check_thirdparty=False):
|
|||||||
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
|
files_to_execute = [a for a in test_files.items() if a[0] in f_name]
|
||||||
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
|
lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, [])
|
||||||
if f_name.endswith(".py") and (not test_files or files_to_execute):
|
if f_name.endswith(".py") and (not test_files or files_to_execute):
|
||||||
# for python2.5 certain tests are not being done, because it
|
|
||||||
# only has these features partially.
|
|
||||||
if is_py25 and f_name in ['generators.py', 'types.py']:
|
|
||||||
continue
|
|
||||||
|
|
||||||
skip = None
|
skip = None
|
||||||
if check_thirdparty:
|
if check_thirdparty:
|
||||||
lib = f_name.replace('_.py', '')
|
lib = f_name.replace('_.py', '')
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import textwrap
|
|||||||
from .base import TestBase, unittest, cwd_at
|
from .base import TestBase, unittest, cwd_at
|
||||||
|
|
||||||
import jedi
|
import jedi
|
||||||
from jedi._compatibility import is_py25, utf8, unicode
|
from jedi._compatibility import utf8, unicode
|
||||||
from jedi import api
|
from jedi import api
|
||||||
api_classes = api.api_classes
|
api_classes = api.api_classes
|
||||||
|
|
||||||
@@ -86,8 +86,7 @@ class TestRegression(TestBase):
|
|||||||
def test_keyword_doc(self):
|
def test_keyword_doc(self):
|
||||||
r = list(self.definition("or", (1, 1)))
|
r = list(self.definition("or", (1, 1)))
|
||||||
assert len(r) == 1
|
assert len(r) == 1
|
||||||
if not is_py25:
|
assert len(r[0].doc) > 100
|
||||||
assert len(r[0].doc) > 100
|
|
||||||
|
|
||||||
r = list(self.definition("asfdasfd", (1, 1)))
|
r = list(self.definition("asfdasfd", (1, 1)))
|
||||||
assert len(r) == 0
|
assert len(r) == 0
|
||||||
@@ -95,8 +94,7 @@ class TestRegression(TestBase):
|
|||||||
def test_operator_doc(self):
|
def test_operator_doc(self):
|
||||||
r = list(self.definition("a == b", (1, 3)))
|
r = list(self.definition("a == b", (1, 3)))
|
||||||
assert len(r) == 1
|
assert len(r) == 1
|
||||||
if not is_py25:
|
assert len(r[0].doc) > 100
|
||||||
assert len(r[0].doc) > 100
|
|
||||||
|
|
||||||
def test_function_call_signature(self):
|
def test_function_call_signature(self):
|
||||||
defs = self.definition("""
|
defs = self.definition("""
|
||||||
@@ -333,8 +331,6 @@ class TestRegression(TestBase):
|
|||||||
assert [d.doc for d in defs]
|
assert [d.doc for d in defs]
|
||||||
|
|
||||||
def test_goto_following_on_imports(self):
|
def test_goto_following_on_imports(self):
|
||||||
if is_py25:
|
|
||||||
return
|
|
||||||
g = self.goto("import multiprocessing.dummy; multiprocessing.dummy")
|
g = self.goto("import multiprocessing.dummy; multiprocessing.dummy")
|
||||||
assert len(g) == 1
|
assert len(g) == 1
|
||||||
assert g[0].start_pos != (0, 0)
|
assert g[0].start_pos != (0, 0)
|
||||||
|
|||||||
9
tox.ini
9
tox.ini
@@ -1,15 +1,10 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py25, py26, py27, py32, py33
|
envlist = py26, py27, py32, py33
|
||||||
[testenv]
|
[testenv]
|
||||||
deps =
|
deps =
|
||||||
pytest
|
https://bitbucket.org/hpk42/pytest/get/c4f58165e0d4.zip
|
||||||
commands =
|
commands =
|
||||||
py.test []
|
py.test []
|
||||||
[testenv:py25]
|
|
||||||
deps =
|
|
||||||
simplejson
|
|
||||||
unittest2
|
|
||||||
{[testenv]deps}
|
|
||||||
[testenv:py26]
|
[testenv:py26]
|
||||||
deps =
|
deps =
|
||||||
unittest2
|
unittest2
|
||||||
|
|||||||
Reference in New Issue
Block a user