forked from VimPlug/jedi
merged dev and linter
This commit is contained in:
@@ -18,10 +18,13 @@ andviro (@andviro)
|
||||
Mike Gilbert (@floppym) <floppym@gentoo.org>
|
||||
Aaron Meurer (@asmeurer) <asmeurer@gmail.com>
|
||||
Lubos Trilety <ltrilety@redhat.com>
|
||||
Akinori Hattori (@hattya)
|
||||
Akinori Hattori (@hattya) <hattya@gmail.com>
|
||||
srusskih (@srusskih)
|
||||
Steven Silvester (@blink1073)
|
||||
Colin Duquesnoy (@ColinDuquesnoy) <colin.duquesnoy@gmail.com>
|
||||
Jorgen Schaefer (@jorgenschaefer) <contact@jorgenschaefer.de>
|
||||
Fredrik Bergroth (@fbergroth)
|
||||
Mathias Fußenegger (@mfussenegger)
|
||||
Syohei Yoshida (@syohex) <syohex@gmail.com>
|
||||
|
||||
Note: (@user) means a github user name.
|
||||
|
||||
@@ -349,7 +349,7 @@ class Script(object):
|
||||
Use :attr:`.call_signatures` instead.
|
||||
.. todo:: Remove!
|
||||
"""
|
||||
warnings.warn("Use line instead.", DeprecationWarning)
|
||||
warnings.warn("Use call_signatures instead.", DeprecationWarning)
|
||||
sig = self.call_signatures()
|
||||
return sig[0] if sig else None
|
||||
|
||||
|
||||
@@ -251,7 +251,7 @@ class BaseDefinition(object):
|
||||
Use :meth:`.docstring` instead.
|
||||
.. todo:: Remove!
|
||||
"""
|
||||
warnings.warn("Use documentation() instead.", DeprecationWarning)
|
||||
warnings.warn("Use docstring() instead.", DeprecationWarning)
|
||||
return self.docstring()
|
||||
|
||||
@property
|
||||
@@ -261,7 +261,7 @@ class BaseDefinition(object):
|
||||
Use :meth:`.docstring` instead.
|
||||
.. todo:: Remove!
|
||||
"""
|
||||
warnings.warn("Use documentation() instead.", DeprecationWarning)
|
||||
warnings.warn("Use docstring() instead.", DeprecationWarning)
|
||||
return self.docstring(raw=True)
|
||||
|
||||
@property
|
||||
@@ -313,6 +313,7 @@ class BaseDefinition(object):
|
||||
stripped = self._definition
|
||||
if isinstance(stripped, pr.Name):
|
||||
stripped = stripped.parent
|
||||
|
||||
# We should probably work in `Finder._names_to_types` here.
|
||||
if isinstance(stripped, pr.Function):
|
||||
stripped = er.Function(self._evaluator, stripped)
|
||||
@@ -469,7 +470,7 @@ class Completion(BaseDefinition):
|
||||
:param fast: Don't follow imports that are only one level deep like
|
||||
``import foo``, but follow ``from foo import bar``. This makes
|
||||
sense for speed reasons. Completing `import a` is slow if you use
|
||||
the ``foo.documentation(fast=False)`` on every object, because it
|
||||
the ``foo.docstring(fast=False)`` on every object, because it
|
||||
parses all libraries starting with ``a``.
|
||||
"""
|
||||
definition = self._definition
|
||||
|
||||
@@ -15,9 +15,13 @@ Then you will be able to use Jedi completer in your Python interpreter::
|
||||
os.path.join().split().index os.path.join().split().insert
|
||||
|
||||
"""
|
||||
|
||||
import jedi.utils
|
||||
from jedi import __version__ as __jedi_version__
|
||||
|
||||
print('REPL completion using Jedi %s' % __jedi_version__)
|
||||
jedi.utils.setup_readline()
|
||||
|
||||
del jedi
|
||||
|
||||
# Note: try not to do many things here, as it will contaminate global
|
||||
# namespace of the interpreter.
|
||||
|
||||
@@ -3,6 +3,7 @@ import sys
|
||||
|
||||
from jedi._compatibility import exec_function, unicode
|
||||
from jedi.parser import representation as pr
|
||||
from jedi.parser import Parser
|
||||
from jedi.evaluate.cache import memoize_default
|
||||
from jedi import debug
|
||||
from jedi import common
|
||||
@@ -15,19 +16,21 @@ def get_sys_path():
|
||||
if not venv:
|
||||
return
|
||||
venv = os.path.abspath(venv)
|
||||
p = os.path.join(
|
||||
venv, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages')
|
||||
if os.name == 'nt':
|
||||
p = os.path.join(venv, 'lib', 'site-packages')
|
||||
else:
|
||||
p = os.path.join(venv, 'lib', 'python%d.%d' % sys.version_info[:2],
|
||||
'site-packages')
|
||||
if p not in sys_path:
|
||||
sys_path.insert(0, p)
|
||||
|
||||
check_virtual_env(sys.path)
|
||||
return [p for p in sys.path if p != ""]
|
||||
|
||||
|
||||
@memoize_default(evaluator_is_first_arg=True)
|
||||
def sys_path_with_modifications(evaluator, module):
|
||||
def execute_code(code):
|
||||
def _execute_code(module_path, code):
|
||||
c = "import os; from os.path import *; result=%s"
|
||||
variables = {'__file__': module.path}
|
||||
variables = {'__file__': module_path}
|
||||
try:
|
||||
exec_function(c % code, variables)
|
||||
except Exception:
|
||||
@@ -42,50 +45,80 @@ def sys_path_with_modifications(evaluator, module):
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def check_module(module):
|
||||
|
||||
def _paths_from_assignment(statement):
|
||||
"""
|
||||
extracts the assigned strings from an assignment that looks as follows::
|
||||
|
||||
>>> sys.path[0:0] = ['module/path', 'another/module/path']
|
||||
"""
|
||||
|
||||
names = statement.get_defined_names()
|
||||
if len(names) != 1:
|
||||
return []
|
||||
if [unicode(x) for x in names[0].names] != ['sys', 'path']:
|
||||
return []
|
||||
expressions = statement.expression_list()
|
||||
if len(expressions) != 1 or not isinstance(expressions[0], pr.Array):
|
||||
return
|
||||
stmts = (s for s in expressions[0].values if isinstance(s, pr.Statement))
|
||||
expression_lists = (s.expression_list() for s in stmts)
|
||||
return [e.value for exprs in expression_lists for e in exprs
|
||||
if isinstance(e, pr.Literal) and e.value]
|
||||
|
||||
|
||||
def _paths_from_insert(module_path, exe):
|
||||
""" extract the inserted module path from an "sys.path.insert" statement
|
||||
"""
|
||||
exe_type, exe.type = exe.type, pr.Array.NOARRAY
|
||||
exe_pop = exe.values.pop(0)
|
||||
res = _execute_code(module_path, exe.get_code())
|
||||
exe.type = exe_type
|
||||
exe.values.insert(0, exe_pop)
|
||||
return res
|
||||
|
||||
|
||||
def _paths_from_call_expression(module_path, call):
|
||||
""" extract the path from either "sys.path.append" or "sys.path.insert" """
|
||||
if call.execution is None:
|
||||
return
|
||||
n = call.name
|
||||
if not isinstance(n, pr.Name) or len(n.names) != 3:
|
||||
return
|
||||
names = [unicode(x) for x in n.names]
|
||||
if names[:2] != ['sys', 'path']:
|
||||
return
|
||||
cmd = names[2]
|
||||
exe = call.execution
|
||||
if cmd == 'insert' and len(exe) == 2:
|
||||
path = _paths_from_insert(module_path, exe)
|
||||
elif cmd == 'append' and len(exe) == 1:
|
||||
path = _execute_code(module_path, exe.get_code())
|
||||
return path and [path] or []
|
||||
|
||||
|
||||
def _check_module(module):
|
||||
try:
|
||||
possible_stmts = module.used_names['path']
|
||||
except KeyError:
|
||||
return get_sys_path()
|
||||
|
||||
sys_path = list(get_sys_path()) # copy
|
||||
for p in possible_stmts:
|
||||
if not isinstance(p, pr.Statement):
|
||||
continue
|
||||
expression_list = p.expression_list()
|
||||
# sys.path command is just one thing.
|
||||
if len(expression_list) != 1 or not isinstance(expression_list[0], pr.Call):
|
||||
continue
|
||||
call = expression_list[0]
|
||||
n = call.name
|
||||
if not isinstance(n, pr.Name) or len(n.names) != 3:
|
||||
continue
|
||||
if [unicode(x) for x in n.names[:2]] != ['sys', 'path']:
|
||||
continue
|
||||
array_cmd = unicode(n.names[2])
|
||||
if call.execution is None:
|
||||
continue
|
||||
exe = call.execution
|
||||
if not (array_cmd == 'insert' and len(exe) == 2
|
||||
or array_cmd == 'append' and len(exe) == 1):
|
||||
continue
|
||||
|
||||
if array_cmd == 'insert':
|
||||
exe_type, exe.type = exe.type, pr.Array.NOARRAY
|
||||
exe_pop = exe.values.pop(0)
|
||||
res = execute_code(exe.get_code())
|
||||
if res is not None:
|
||||
sys_path.insert(0, res)
|
||||
debug.dbg('sys path inserted: %s', res)
|
||||
exe.type = exe_type
|
||||
exe.values.insert(0, exe_pop)
|
||||
elif array_cmd == 'append':
|
||||
res = execute_code(exe.get_code())
|
||||
if res is not None:
|
||||
sys_path.append(res)
|
||||
debug.dbg('sys path added: %s', res)
|
||||
statements = (p for p in possible_stmts if isinstance(p, pr.Statement))
|
||||
for stmt in statements:
|
||||
expressions = stmt.expression_list()
|
||||
if len(expressions) == 1 and isinstance(expressions[0], pr.Call):
|
||||
sys_path.extend(
|
||||
_paths_from_call_expression(module.path, expressions[0]) or [])
|
||||
elif (
|
||||
hasattr(stmt, 'assignment_details') and
|
||||
len(stmt.assignment_details) == 1
|
||||
):
|
||||
sys_path.extend(_paths_from_assignment(stmt) or [])
|
||||
return sys_path
|
||||
|
||||
|
||||
@memoize_default(evaluator_is_first_arg=True)
|
||||
def sys_path_with_modifications(evaluator, module):
|
||||
if module.path is None:
|
||||
# Support for modules without a path is bad, therefore return the
|
||||
# normal path.
|
||||
@@ -95,27 +128,82 @@ def sys_path_with_modifications(evaluator, module):
|
||||
with common.ignored(OSError):
|
||||
os.chdir(os.path.dirname(module.path))
|
||||
|
||||
result = check_module(module)
|
||||
result = _check_module(module)
|
||||
result += _detect_django_path(module.path)
|
||||
|
||||
# buildout scripts often contain the same sys.path modifications
|
||||
# the set here is used to avoid duplicate sys.path entries
|
||||
buildout_paths = set()
|
||||
for module_path in _get_buildout_scripts(module.path):
|
||||
try:
|
||||
with open(module_path, 'rb') as f:
|
||||
source = f.read()
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
p = Parser(common.source_to_unicode(source), module_path)
|
||||
for path in _check_module(p.module):
|
||||
if path not in buildout_paths:
|
||||
buildout_paths.add(path)
|
||||
result.append(path)
|
||||
# cleanup, back to old directory
|
||||
os.chdir(curdir)
|
||||
return result
|
||||
return list(result)
|
||||
|
||||
|
||||
def _traverse_parents(path):
|
||||
while True:
|
||||
new = os.path.dirname(path)
|
||||
if new == path:
|
||||
return
|
||||
path = new
|
||||
yield path
|
||||
|
||||
|
||||
def _get_parent_dir_with_file(path, filename):
|
||||
for parent in _traverse_parents(path):
|
||||
if os.path.isfile(os.path.join(parent, filename)):
|
||||
return parent
|
||||
return None
|
||||
|
||||
|
||||
def _detect_django_path(module_path):
|
||||
""" Detects the path of the very well known Django library (if used) """
|
||||
result = []
|
||||
while True:
|
||||
new = os.path.dirname(module_path)
|
||||
# If the module_path doesn't change anymore, we're finished -> /
|
||||
if new == module_path:
|
||||
break
|
||||
else:
|
||||
module_path = new
|
||||
|
||||
for parent in _traverse_parents(module_path):
|
||||
with common.ignored(IOError):
|
||||
with open(module_path + os.path.sep + 'manage.py'):
|
||||
with open(parent + os.path.sep + 'manage.py'):
|
||||
debug.dbg('Found django path: %s', module_path)
|
||||
result.append(module_path)
|
||||
result.append(parent)
|
||||
return result
|
||||
|
||||
|
||||
def _get_buildout_scripts(module_path):
|
||||
"""
|
||||
if there is a 'buildout.cfg' file in one of the parent directories of the
|
||||
given module it will return a list of all files in the buildout bin
|
||||
directory that look like python files.
|
||||
|
||||
:param module_path: absolute path to the module.
|
||||
:type module_path: str
|
||||
"""
|
||||
project_root = _get_parent_dir_with_file(module_path, 'buildout.cfg')
|
||||
if not project_root:
|
||||
return []
|
||||
bin_path = os.path.join(project_root, 'bin')
|
||||
if not os.path.exists(bin_path):
|
||||
return []
|
||||
extra_module_paths = []
|
||||
for filename in os.listdir(bin_path):
|
||||
try:
|
||||
filepath = os.path.join(bin_path, filename)
|
||||
with open(filepath, 'r') as f:
|
||||
firstline = f.readline()
|
||||
if firstline.startswith('#!') and 'python' in firstline:
|
||||
extra_module_paths.append(filepath)
|
||||
except IOError as e:
|
||||
# either permission error or race cond. because file got deleted
|
||||
# ignore
|
||||
debug.warning(unicode(e))
|
||||
continue
|
||||
return extra_module_paths
|
||||
|
||||
@@ -76,11 +76,16 @@ class Parser(object):
|
||||
d.parent = self.module
|
||||
|
||||
self.module.end_pos = self._gen.current.end_pos
|
||||
if self._gen.current.type in (tokenize.NEWLINE,):
|
||||
if self._gen.current.type == tokenize.NEWLINE:
|
||||
# This case is only relevant with the FastTokenizer, because
|
||||
# otherwise there's always an EndMarker.
|
||||
# otherwise there's always an ENDMARKER.
|
||||
# we added a newline before, so we need to "remove" it again.
|
||||
self.module.end_pos = self._gen.tokenizer_previous.end_pos
|
||||
#
|
||||
# NOTE: It should be keep end_pos as-is if the last token of
|
||||
# a source is a NEWLINE, otherwise the newline at the end of
|
||||
# a source is not included in a ParserNode.code.
|
||||
if self._gen.previous.type != tokenize.NEWLINE:
|
||||
self.module.end_pos = self._gen.previous.end_pos
|
||||
|
||||
del self._gen
|
||||
|
||||
@@ -623,17 +628,10 @@ class PushBackTokenizer(object):
|
||||
if self._push_backs:
|
||||
return self._push_backs.pop(0)
|
||||
|
||||
self.previous = self.current
|
||||
previous = self.current
|
||||
self.current = next(self._tokenizer)
|
||||
self.previous = previous
|
||||
return self.current
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
@property
|
||||
def tokenizer_previous(self):
|
||||
"""
|
||||
Temporary hack, basically returns the last previous if the fast parser
|
||||
sees an EndMarker. The fast parser positions have to be changed anyway.
|
||||
"""
|
||||
return self._tokenizer.previous
|
||||
|
||||
@@ -183,6 +183,9 @@ class ParserNode(object):
|
||||
|
||||
|
||||
class FastParser(use_metaclass(CachedFastParser)):
|
||||
|
||||
_keyword_re = re.compile('^[ \t]*(def|class|@|%s)' % '|'.join(tokenize.FLOWS))
|
||||
|
||||
def __init__(self, code, module_path=None):
|
||||
# set values like `pr.Module`.
|
||||
self.module_path = module_path
|
||||
@@ -196,7 +199,7 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
self._parse(code)
|
||||
except:
|
||||
# FastParser is cached, be careful with exceptions
|
||||
self.parsers[:] = []
|
||||
del self.parsers[:]
|
||||
raise
|
||||
|
||||
def update(self, code):
|
||||
@@ -206,7 +209,7 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
self._parse(code)
|
||||
except:
|
||||
# FastParser is cached, be careful with exceptions
|
||||
self.parsers[:] = []
|
||||
del self.parsers[:]
|
||||
raise
|
||||
|
||||
def _split_parts(self, code):
|
||||
@@ -215,34 +218,26 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
each part seperately and therefore cache parts of the file and not
|
||||
everything.
|
||||
"""
|
||||
def add_part():
|
||||
txt = '\n'.join(current_lines)
|
||||
if txt:
|
||||
if add_to_last and parts:
|
||||
parts[-1] += '\n' + txt
|
||||
else:
|
||||
parts.append(txt)
|
||||
current_lines[:] = []
|
||||
|
||||
r_keyword = '^[ \t]*(def|class|@|%s)' % '|'.join(tokenize.FLOWS)
|
||||
def gen_part():
|
||||
text = '\n'.join(current_lines)
|
||||
del current_lines[:]
|
||||
return text
|
||||
|
||||
# Split only new lines. Distinction between \r\n is the tokenizer's
|
||||
# job.
|
||||
self._lines = code.split('\n')
|
||||
current_lines = []
|
||||
parts = []
|
||||
is_decorator = False
|
||||
current_indent = 0
|
||||
old_indent = 0
|
||||
new_indent = False
|
||||
in_flow = False
|
||||
add_to_last = False
|
||||
# All things within flows are simply being ignored.
|
||||
for i, l in enumerate(self._lines):
|
||||
for l in self._lines:
|
||||
# check for dedents
|
||||
m = re.match('^([\t ]*)(.?)', l)
|
||||
indent = len(m.group(1))
|
||||
if m.group(2) in ['', '#']:
|
||||
s = l.lstrip('\t ')
|
||||
indent = len(l) - len(s)
|
||||
if not s or s[0] in ('#', '\r'):
|
||||
current_lines.append(l) # just ignore comments and blank lines
|
||||
continue
|
||||
|
||||
@@ -250,8 +245,8 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
current_indent = indent
|
||||
new_indent = False
|
||||
if not in_flow or indent < old_indent:
|
||||
add_part()
|
||||
add_to_last = False
|
||||
if current_lines:
|
||||
yield gen_part()
|
||||
in_flow = False
|
||||
elif new_indent:
|
||||
current_indent = indent
|
||||
@@ -259,12 +254,12 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
|
||||
# Check lines for functions/classes and split the code there.
|
||||
if not in_flow:
|
||||
m = re.match(r_keyword, l)
|
||||
m = self._keyword_re.match(l)
|
||||
if m:
|
||||
in_flow = m.group(1) in tokenize.FLOWS
|
||||
if not is_decorator and not in_flow:
|
||||
add_part()
|
||||
add_to_last = False
|
||||
if current_lines:
|
||||
yield gen_part()
|
||||
is_decorator = '@' == m.group(1)
|
||||
if not is_decorator:
|
||||
old_indent = current_indent
|
||||
@@ -272,12 +267,10 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
new_indent = True
|
||||
elif is_decorator:
|
||||
is_decorator = False
|
||||
add_to_last = True
|
||||
|
||||
current_lines.append(l)
|
||||
add_part()
|
||||
|
||||
return parts
|
||||
if current_lines:
|
||||
yield gen_part()
|
||||
|
||||
def _parse(self, code):
|
||||
""" :type code: str """
|
||||
@@ -285,24 +278,20 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
new, temp = self._get_parser(unicode(''), unicode(''), 0, [], False)
|
||||
return new
|
||||
|
||||
parts = self._split_parts(code)
|
||||
self.parsers[:] = []
|
||||
del self.parsers[:]
|
||||
|
||||
line_offset = 0
|
||||
start = 0
|
||||
p = None
|
||||
is_first = True
|
||||
|
||||
for code_part in parts:
|
||||
lines = code_part.count('\n') + 1
|
||||
for code_part in self._split_parts(code):
|
||||
if is_first or line_offset >= p.module.end_pos[0]:
|
||||
indent = len(re.match(r'[ \t]*', code_part).group(0))
|
||||
indent = len(code_part) - len(code_part.lstrip('\t '))
|
||||
if is_first and self.current_node is not None:
|
||||
nodes = [self.current_node]
|
||||
else:
|
||||
nodes = []
|
||||
if self.current_node is not None:
|
||||
|
||||
self.current_node = \
|
||||
self.current_node.parent_until_indent(indent)
|
||||
nodes += self.current_node.old_children
|
||||
@@ -347,7 +336,7 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
#else:
|
||||
#print '#'*45, line_offset, p.module.end_pos, 'theheck\n', repr(code_part)
|
||||
|
||||
line_offset += lines
|
||||
line_offset += code_part.count('\n') + 1
|
||||
start += len(code_part) + 1 # +1 for newline
|
||||
|
||||
if self.parsers:
|
||||
@@ -358,29 +347,26 @@ class FastParser(use_metaclass(CachedFastParser)):
|
||||
self.module.end_pos = self.parsers[-1].module.end_pos
|
||||
|
||||
# print(self.parsers[0].module.get_code())
|
||||
del code
|
||||
|
||||
def _get_parser(self, code, parser_code, line_offset, nodes, no_docstr):
|
||||
h = hash(code)
|
||||
hashes = [n.hash for n in nodes]
|
||||
node = None
|
||||
try:
|
||||
index = hashes.index(h)
|
||||
if nodes[index].code != code:
|
||||
raise ValueError()
|
||||
except ValueError:
|
||||
for index, node in enumerate(nodes):
|
||||
if node.hash != h or node.code != code:
|
||||
continue
|
||||
|
||||
if node != self.current_node:
|
||||
offset = int(nodes[0] == self.current_node)
|
||||
self.current_node.old_children.pop(index - offset)
|
||||
p = node.parser
|
||||
m = p.module
|
||||
m.line_offset += line_offset + 1 - m.start_pos[0]
|
||||
break
|
||||
else:
|
||||
tokenizer = FastTokenizer(parser_code, line_offset)
|
||||
p = Parser(parser_code, self.module_path, tokenizer=tokenizer,
|
||||
top_module=self.module, no_docstr=no_docstr)
|
||||
p.module.parent = self.module
|
||||
else:
|
||||
if nodes[index] != self.current_node:
|
||||
offset = int(nodes[0] == self.current_node)
|
||||
self.current_node.old_children.pop(index - offset)
|
||||
node = nodes.pop(index)
|
||||
p = node.parser
|
||||
m = p.module
|
||||
m.line_offset += line_offset + 1 - m.start_pos[0]
|
||||
node = None
|
||||
|
||||
return p, node
|
||||
|
||||
|
||||
2
setup.py
2
setup.py
@@ -26,7 +26,7 @@ setup(name='jedi',
|
||||
keywords='python completion refactoring vim',
|
||||
long_description=readme,
|
||||
packages=['jedi', 'jedi.parser', 'jedi.evaluate', 'jedi.evaluate.compiled', 'jedi.api'],
|
||||
package_data={'jedi': ['evlaluate/evaluate/compiled/fake/*.pym']},
|
||||
package_data={'jedi': ['evaluate/compiled/fake/*.pym']},
|
||||
platforms=['any'],
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
|
||||
@@ -127,6 +127,11 @@ def test_completion_docstring():
|
||||
assert c.docstring(raw=True, fast=False) == cleandoc(Script.__doc__)
|
||||
|
||||
|
||||
def test_completion_params():
|
||||
c = Script('import string; string.capwords').completions()[0]
|
||||
assert [p.name for p in c.params] == ['s', 'sep']
|
||||
|
||||
|
||||
def test_signature_params():
|
||||
def check(defs):
|
||||
params = defs[0].params
|
||||
|
||||
12
test/test_evaluate/buildout_project/bin/app
Normal file
12
test/test_evaluate/buildout_project/bin/app
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import sys
|
||||
sys.path[0:0] = [
|
||||
'/usr/lib/python3.4/site-packages',
|
||||
'/tmp/.buildout/eggs/important_package.egg'
|
||||
]
|
||||
|
||||
import important_package
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(important_package.main())
|
||||
0
test/test_evaluate/buildout_project/bin/empty_file
Normal file
0
test/test_evaluate/buildout_project/bin/empty_file
Normal file
0
test/test_evaluate/buildout_project/buildout.cfg
Normal file
0
test/test_evaluate/buildout_project/buildout.cfg
Normal file
71
test/test_evaluate/test_buildout_detection.py
Normal file
71
test/test_evaluate/test_buildout_detection.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import os
|
||||
from ..helpers import cwd_at
|
||||
from jedi._compatibility import u
|
||||
from jedi.parser import Parser
|
||||
from jedi.evaluate.sys_path import (
|
||||
_get_parent_dir_with_file,
|
||||
_get_buildout_scripts,
|
||||
_check_module
|
||||
)
|
||||
|
||||
|
||||
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
|
||||
def test_parent_dir_with_file():
|
||||
parent = _get_parent_dir_with_file(
|
||||
os.path.abspath(os.curdir), 'buildout.cfg')
|
||||
assert parent is not None
|
||||
assert parent.endswith('test/test_evaluate/buildout_project')
|
||||
|
||||
|
||||
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
|
||||
def test_buildout_detection():
|
||||
scripts = _get_buildout_scripts(os.path.abspath('./module_name.py'))
|
||||
assert len(scripts) == 1
|
||||
curdir = os.path.abspath(os.curdir)
|
||||
appdir_path = os.path.normpath(os.path.join(curdir, '../../bin/app'))
|
||||
assert scripts[0] == appdir_path
|
||||
|
||||
|
||||
def test_append_on_non_sys_path():
|
||||
SRC = u("""
|
||||
class Dummy(object):
|
||||
path = []
|
||||
|
||||
d = Dummy()
|
||||
d.path.append('foo')""")
|
||||
p = Parser(SRC)
|
||||
paths = _check_module(p.module)
|
||||
assert len(paths) > 0
|
||||
assert 'foo' not in paths
|
||||
|
||||
|
||||
def test_path_from_invalid_sys_path_assignment():
|
||||
SRC = u("""
|
||||
import sys
|
||||
sys.path = 'invalid'""")
|
||||
p = Parser(SRC)
|
||||
paths = _check_module(p.module)
|
||||
assert len(paths) > 0
|
||||
assert 'invalid' not in paths
|
||||
|
||||
|
||||
def test_path_from_sys_path_assignment():
|
||||
SRC = u("""
|
||||
#!/usr/bin/python
|
||||
|
||||
import sys
|
||||
sys.path[0:0] = [
|
||||
'/usr/lib/python3.4/site-packages',
|
||||
'/home/test/.buildout/eggs/important_package.egg'
|
||||
]
|
||||
|
||||
path[0:0] = [1]
|
||||
|
||||
import important_package
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(important_package.main())""")
|
||||
p = Parser(SRC)
|
||||
paths = _check_module(p.module)
|
||||
assert 1 not in paths
|
||||
assert '/home/test/.buildout/eggs/important_package.egg' in paths
|
||||
@@ -57,7 +57,7 @@ class TestRegression(TestCase):
|
||||
|
||||
self.assertRaises(jedi.NotFoundError, get_def, cls)
|
||||
|
||||
@pytest.mark.skip('Skip for now, test case is not really supported.')
|
||||
@pytest.mark.skipif('True', reason='Skip for now, test case is not really supported.')
|
||||
@cwd_at('jedi')
|
||||
def test_add_dynamic_mods(self):
|
||||
fname = '__main__.py'
|
||||
|
||||
Reference in New Issue
Block a user