1
0
forked from VimPlug/jedi

merged dev and linter

This commit is contained in:
Dave Halter
2014-07-17 23:17:45 +02:00
16 changed files with 321 additions and 153 deletions

View File

@@ -18,10 +18,13 @@ andviro (@andviro)
Mike Gilbert (@floppym) <floppym@gentoo.org> Mike Gilbert (@floppym) <floppym@gentoo.org>
Aaron Meurer (@asmeurer) <asmeurer@gmail.com> Aaron Meurer (@asmeurer) <asmeurer@gmail.com>
Lubos Trilety <ltrilety@redhat.com> Lubos Trilety <ltrilety@redhat.com>
Akinori Hattori (@hattya) Akinori Hattori (@hattya) <hattya@gmail.com>
srusskih (@srusskih) srusskih (@srusskih)
Steven Silvester (@blink1073) Steven Silvester (@blink1073)
Colin Duquesnoy (@ColinDuquesnoy) <colin.duquesnoy@gmail.com> Colin Duquesnoy (@ColinDuquesnoy) <colin.duquesnoy@gmail.com>
Jorgen Schaefer (@jorgenschaefer) <contact@jorgenschaefer.de> Jorgen Schaefer (@jorgenschaefer) <contact@jorgenschaefer.de>
Fredrik Bergroth (@fbergroth)
Mathias Fußenegger (@mfussenegger)
Syohei Yoshida (@syohex) <syohex@gmail.com>
Note: (@user) means a github user name. Note: (@user) means a github user name.

View File

@@ -349,7 +349,7 @@ class Script(object):
Use :attr:`.call_signatures` instead. Use :attr:`.call_signatures` instead.
.. todo:: Remove! .. todo:: Remove!
""" """
warnings.warn("Use line instead.", DeprecationWarning) warnings.warn("Use call_signatures instead.", DeprecationWarning)
sig = self.call_signatures() sig = self.call_signatures()
return sig[0] if sig else None return sig[0] if sig else None

View File

@@ -251,7 +251,7 @@ class BaseDefinition(object):
Use :meth:`.docstring` instead. Use :meth:`.docstring` instead.
.. todo:: Remove! .. todo:: Remove!
""" """
warnings.warn("Use documentation() instead.", DeprecationWarning) warnings.warn("Use docstring() instead.", DeprecationWarning)
return self.docstring() return self.docstring()
@property @property
@@ -261,7 +261,7 @@ class BaseDefinition(object):
Use :meth:`.docstring` instead. Use :meth:`.docstring` instead.
.. todo:: Remove! .. todo:: Remove!
""" """
warnings.warn("Use documentation() instead.", DeprecationWarning) warnings.warn("Use docstring() instead.", DeprecationWarning)
return self.docstring(raw=True) return self.docstring(raw=True)
@property @property
@@ -313,11 +313,12 @@ class BaseDefinition(object):
stripped = self._definition stripped = self._definition
if isinstance(stripped, pr.Name): if isinstance(stripped, pr.Name):
stripped = stripped.parent stripped = stripped.parent
# We should probably work in `Finder._names_to_types` here.
if isinstance(stripped, pr.Function): # We should probably work in `Finder._names_to_types` here.
stripped = er.Function(self._evaluator, stripped) if isinstance(stripped, pr.Function):
elif isinstance(stripped, pr.Class): stripped = er.Function(self._evaluator, stripped)
stripped = er.Class(self._evaluator, stripped) elif isinstance(stripped, pr.Class):
stripped = er.Class(self._evaluator, stripped)
if stripped.isinstance(pr.Statement): if stripped.isinstance(pr.Statement):
return self._evaluator.eval_statement(stripped) return self._evaluator.eval_statement(stripped)
@@ -469,7 +470,7 @@ class Completion(BaseDefinition):
:param fast: Don't follow imports that are only one level deep like :param fast: Don't follow imports that are only one level deep like
``import foo``, but follow ``from foo import bar``. This makes ``import foo``, but follow ``from foo import bar``. This makes
sense for speed reasons. Completing `import a` is slow if you use sense for speed reasons. Completing `import a` is slow if you use
the ``foo.documentation(fast=False)`` on every object, because it the ``foo.docstring(fast=False)`` on every object, because it
parses all libraries starting with ``a``. parses all libraries starting with ``a``.
""" """
definition = self._definition definition = self._definition

View File

@@ -15,9 +15,13 @@ Then you will be able to use Jedi completer in your Python interpreter::
os.path.join().split().index os.path.join().split().insert os.path.join().split().index os.path.join().split().insert
""" """
import jedi.utils import jedi.utils
from jedi import __version__ as __jedi_version__
print('REPL completion using Jedi %s' % __jedi_version__)
jedi.utils.setup_readline() jedi.utils.setup_readline()
del jedi del jedi
# Note: try not to do many things here, as it will contaminate global # Note: try not to do many things here, as it will contaminate global
# namespace of the interpreter. # namespace of the interpreter.

View File

@@ -3,6 +3,7 @@ import sys
from jedi._compatibility import exec_function, unicode from jedi._compatibility import exec_function, unicode
from jedi.parser import representation as pr from jedi.parser import representation as pr
from jedi.parser import Parser
from jedi.evaluate.cache import memoize_default from jedi.evaluate.cache import memoize_default
from jedi import debug from jedi import debug
from jedi import common from jedi import common
@@ -15,77 +16,109 @@ def get_sys_path():
if not venv: if not venv:
return return
venv = os.path.abspath(venv) venv = os.path.abspath(venv)
p = os.path.join( if os.name == 'nt':
venv, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages') p = os.path.join(venv, 'lib', 'site-packages')
sys_path.insert(0, p) else:
p = os.path.join(venv, 'lib', 'python%d.%d' % sys.version_info[:2],
'site-packages')
if p not in sys_path:
sys_path.insert(0, p)
check_virtual_env(sys.path) check_virtual_env(sys.path)
return [p for p in sys.path if p != ""] return [p for p in sys.path if p != ""]
def _execute_code(module_path, code):
c = "import os; from os.path import *; result=%s"
variables = {'__file__': module_path}
try:
exec_function(c % code, variables)
except Exception:
debug.warning('sys.path manipulation detected, but failed to evaluate.')
return None
try:
res = variables['result']
if isinstance(res, str):
return os.path.abspath(res)
else:
return None
except KeyError:
return None
def _paths_from_assignment(statement):
"""
extracts the assigned strings from an assignment that looks as follows::
>>> sys.path[0:0] = ['module/path', 'another/module/path']
"""
names = statement.get_defined_names()
if len(names) != 1:
return []
if [unicode(x) for x in names[0].names] != ['sys', 'path']:
return []
expressions = statement.expression_list()
if len(expressions) != 1 or not isinstance(expressions[0], pr.Array):
return
stmts = (s for s in expressions[0].values if isinstance(s, pr.Statement))
expression_lists = (s.expression_list() for s in stmts)
return [e.value for exprs in expression_lists for e in exprs
if isinstance(e, pr.Literal) and e.value]
def _paths_from_insert(module_path, exe):
""" extract the inserted module path from an "sys.path.insert" statement
"""
exe_type, exe.type = exe.type, pr.Array.NOARRAY
exe_pop = exe.values.pop(0)
res = _execute_code(module_path, exe.get_code())
exe.type = exe_type
exe.values.insert(0, exe_pop)
return res
def _paths_from_call_expression(module_path, call):
""" extract the path from either "sys.path.append" or "sys.path.insert" """
if call.execution is None:
return
n = call.name
if not isinstance(n, pr.Name) or len(n.names) != 3:
return
names = [unicode(x) for x in n.names]
if names[:2] != ['sys', 'path']:
return
cmd = names[2]
exe = call.execution
if cmd == 'insert' and len(exe) == 2:
path = _paths_from_insert(module_path, exe)
elif cmd == 'append' and len(exe) == 1:
path = _execute_code(module_path, exe.get_code())
return path and [path] or []
def _check_module(module):
try:
possible_stmts = module.used_names['path']
except KeyError:
return get_sys_path()
sys_path = list(get_sys_path()) # copy
statements = (p for p in possible_stmts if isinstance(p, pr.Statement))
for stmt in statements:
expressions = stmt.expression_list()
if len(expressions) == 1 and isinstance(expressions[0], pr.Call):
sys_path.extend(
_paths_from_call_expression(module.path, expressions[0]) or [])
elif (
hasattr(stmt, 'assignment_details') and
len(stmt.assignment_details) == 1
):
sys_path.extend(_paths_from_assignment(stmt) or [])
return sys_path
@memoize_default(evaluator_is_first_arg=True) @memoize_default(evaluator_is_first_arg=True)
def sys_path_with_modifications(evaluator, module): def sys_path_with_modifications(evaluator, module):
def execute_code(code):
c = "import os; from os.path import *; result=%s"
variables = {'__file__': module.path}
try:
exec_function(c % code, variables)
except Exception:
debug.warning('sys.path manipulation detected, but failed to evaluate.')
return None
try:
res = variables['result']
if isinstance(res, str):
return os.path.abspath(res)
else:
return None
except KeyError:
return None
def check_module(module):
try:
possible_stmts = module.used_names['path']
except KeyError:
return get_sys_path()
sys_path = list(get_sys_path()) # copy
for p in possible_stmts:
if not isinstance(p, pr.Statement):
continue
expression_list = p.expression_list()
# sys.path command is just one thing.
if len(expression_list) != 1 or not isinstance(expression_list[0], pr.Call):
continue
call = expression_list[0]
n = call.name
if not isinstance(n, pr.Name) or len(n.names) != 3:
continue
if [unicode(x) for x in n.names[:2]] != ['sys', 'path']:
continue
array_cmd = unicode(n.names[2])
if call.execution is None:
continue
exe = call.execution
if not (array_cmd == 'insert' and len(exe) == 2
or array_cmd == 'append' and len(exe) == 1):
continue
if array_cmd == 'insert':
exe_type, exe.type = exe.type, pr.Array.NOARRAY
exe_pop = exe.values.pop(0)
res = execute_code(exe.get_code())
if res is not None:
sys_path.insert(0, res)
debug.dbg('sys path inserted: %s', res)
exe.type = exe_type
exe.values.insert(0, exe_pop)
elif array_cmd == 'append':
res = execute_code(exe.get_code())
if res is not None:
sys_path.append(res)
debug.dbg('sys path added: %s', res)
return sys_path
if module.path is None: if module.path is None:
# Support for modules without a path is bad, therefore return the # Support for modules without a path is bad, therefore return the
# normal path. # normal path.
@@ -95,27 +128,82 @@ def sys_path_with_modifications(evaluator, module):
with common.ignored(OSError): with common.ignored(OSError):
os.chdir(os.path.dirname(module.path)) os.chdir(os.path.dirname(module.path))
result = check_module(module) result = _check_module(module)
result += _detect_django_path(module.path) result += _detect_django_path(module.path)
# buildout scripts often contain the same sys.path modifications
# the set here is used to avoid duplicate sys.path entries
buildout_paths = set()
for module_path in _get_buildout_scripts(module.path):
try:
with open(module_path, 'rb') as f:
source = f.read()
except IOError:
pass
else:
p = Parser(common.source_to_unicode(source), module_path)
for path in _check_module(p.module):
if path not in buildout_paths:
buildout_paths.add(path)
result.append(path)
# cleanup, back to old directory # cleanup, back to old directory
os.chdir(curdir) os.chdir(curdir)
return result return list(result)
def _traverse_parents(path):
while True:
new = os.path.dirname(path)
if new == path:
return
path = new
yield path
def _get_parent_dir_with_file(path, filename):
for parent in _traverse_parents(path):
if os.path.isfile(os.path.join(parent, filename)):
return parent
return None
def _detect_django_path(module_path): def _detect_django_path(module_path):
""" Detects the path of the very well known Django library (if used) """ """ Detects the path of the very well known Django library (if used) """
result = [] result = []
while True:
new = os.path.dirname(module_path)
# If the module_path doesn't change anymore, we're finished -> /
if new == module_path:
break
else:
module_path = new
for parent in _traverse_parents(module_path):
with common.ignored(IOError): with common.ignored(IOError):
with open(module_path + os.path.sep + 'manage.py'): with open(parent + os.path.sep + 'manage.py'):
debug.dbg('Found django path: %s', module_path) debug.dbg('Found django path: %s', module_path)
result.append(module_path) result.append(parent)
return result return result
def _get_buildout_scripts(module_path):
"""
if there is a 'buildout.cfg' file in one of the parent directories of the
given module it will return a list of all files in the buildout bin
directory that look like python files.
:param module_path: absolute path to the module.
:type module_path: str
"""
project_root = _get_parent_dir_with_file(module_path, 'buildout.cfg')
if not project_root:
return []
bin_path = os.path.join(project_root, 'bin')
if not os.path.exists(bin_path):
return []
extra_module_paths = []
for filename in os.listdir(bin_path):
try:
filepath = os.path.join(bin_path, filename)
with open(filepath, 'r') as f:
firstline = f.readline()
if firstline.startswith('#!') and 'python' in firstline:
extra_module_paths.append(filepath)
except IOError as e:
# either permission error or race cond. because file got deleted
# ignore
debug.warning(unicode(e))
continue
return extra_module_paths

View File

@@ -76,11 +76,16 @@ class Parser(object):
d.parent = self.module d.parent = self.module
self.module.end_pos = self._gen.current.end_pos self.module.end_pos = self._gen.current.end_pos
if self._gen.current.type in (tokenize.NEWLINE,): if self._gen.current.type == tokenize.NEWLINE:
# This case is only relevant with the FastTokenizer, because # This case is only relevant with the FastTokenizer, because
# otherwise there's always an EndMarker. # otherwise there's always an ENDMARKER.
# we added a newline before, so we need to "remove" it again. # we added a newline before, so we need to "remove" it again.
self.module.end_pos = self._gen.tokenizer_previous.end_pos #
# NOTE: It should be keep end_pos as-is if the last token of
# a source is a NEWLINE, otherwise the newline at the end of
# a source is not included in a ParserNode.code.
if self._gen.previous.type != tokenize.NEWLINE:
self.module.end_pos = self._gen.previous.end_pos
del self._gen del self._gen
@@ -623,17 +628,10 @@ class PushBackTokenizer(object):
if self._push_backs: if self._push_backs:
return self._push_backs.pop(0) return self._push_backs.pop(0)
self.previous = self.current previous = self.current
self.current = next(self._tokenizer) self.current = next(self._tokenizer)
self.previous = previous
return self.current return self.current
def __iter__(self): def __iter__(self):
return self return self
@property
def tokenizer_previous(self):
"""
Temporary hack, basically returns the last previous if the fast parser
sees an EndMarker. The fast parser positions have to be changed anyway.
"""
return self._tokenizer.previous

View File

@@ -183,6 +183,9 @@ class ParserNode(object):
class FastParser(use_metaclass(CachedFastParser)): class FastParser(use_metaclass(CachedFastParser)):
_keyword_re = re.compile('^[ \t]*(def|class|@|%s)' % '|'.join(tokenize.FLOWS))
def __init__(self, code, module_path=None): def __init__(self, code, module_path=None):
# set values like `pr.Module`. # set values like `pr.Module`.
self.module_path = module_path self.module_path = module_path
@@ -196,7 +199,7 @@ class FastParser(use_metaclass(CachedFastParser)):
self._parse(code) self._parse(code)
except: except:
# FastParser is cached, be careful with exceptions # FastParser is cached, be careful with exceptions
self.parsers[:] = [] del self.parsers[:]
raise raise
def update(self, code): def update(self, code):
@@ -206,7 +209,7 @@ class FastParser(use_metaclass(CachedFastParser)):
self._parse(code) self._parse(code)
except: except:
# FastParser is cached, be careful with exceptions # FastParser is cached, be careful with exceptions
self.parsers[:] = [] del self.parsers[:]
raise raise
def _split_parts(self, code): def _split_parts(self, code):
@@ -215,34 +218,26 @@ class FastParser(use_metaclass(CachedFastParser)):
each part seperately and therefore cache parts of the file and not each part seperately and therefore cache parts of the file and not
everything. everything.
""" """
def add_part(): def gen_part():
txt = '\n'.join(current_lines) text = '\n'.join(current_lines)
if txt: del current_lines[:]
if add_to_last and parts: return text
parts[-1] += '\n' + txt
else:
parts.append(txt)
current_lines[:] = []
r_keyword = '^[ \t]*(def|class|@|%s)' % '|'.join(tokenize.FLOWS)
# Split only new lines. Distinction between \r\n is the tokenizer's # Split only new lines. Distinction between \r\n is the tokenizer's
# job. # job.
self._lines = code.split('\n') self._lines = code.split('\n')
current_lines = [] current_lines = []
parts = []
is_decorator = False is_decorator = False
current_indent = 0 current_indent = 0
old_indent = 0 old_indent = 0
new_indent = False new_indent = False
in_flow = False in_flow = False
add_to_last = False
# All things within flows are simply being ignored. # All things within flows are simply being ignored.
for i, l in enumerate(self._lines): for l in self._lines:
# check for dedents # check for dedents
m = re.match('^([\t ]*)(.?)', l) s = l.lstrip('\t ')
indent = len(m.group(1)) indent = len(l) - len(s)
if m.group(2) in ['', '#']: if not s or s[0] in ('#', '\r'):
current_lines.append(l) # just ignore comments and blank lines current_lines.append(l) # just ignore comments and blank lines
continue continue
@@ -250,8 +245,8 @@ class FastParser(use_metaclass(CachedFastParser)):
current_indent = indent current_indent = indent
new_indent = False new_indent = False
if not in_flow or indent < old_indent: if not in_flow or indent < old_indent:
add_part() if current_lines:
add_to_last = False yield gen_part()
in_flow = False in_flow = False
elif new_indent: elif new_indent:
current_indent = indent current_indent = indent
@@ -259,12 +254,12 @@ class FastParser(use_metaclass(CachedFastParser)):
# Check lines for functions/classes and split the code there. # Check lines for functions/classes and split the code there.
if not in_flow: if not in_flow:
m = re.match(r_keyword, l) m = self._keyword_re.match(l)
if m: if m:
in_flow = m.group(1) in tokenize.FLOWS in_flow = m.group(1) in tokenize.FLOWS
if not is_decorator and not in_flow: if not is_decorator and not in_flow:
add_part() if current_lines:
add_to_last = False yield gen_part()
is_decorator = '@' == m.group(1) is_decorator = '@' == m.group(1)
if not is_decorator: if not is_decorator:
old_indent = current_indent old_indent = current_indent
@@ -272,12 +267,10 @@ class FastParser(use_metaclass(CachedFastParser)):
new_indent = True new_indent = True
elif is_decorator: elif is_decorator:
is_decorator = False is_decorator = False
add_to_last = True
current_lines.append(l) current_lines.append(l)
add_part() if current_lines:
yield gen_part()
return parts
def _parse(self, code): def _parse(self, code):
""" :type code: str """ """ :type code: str """
@@ -285,24 +278,20 @@ class FastParser(use_metaclass(CachedFastParser)):
new, temp = self._get_parser(unicode(''), unicode(''), 0, [], False) new, temp = self._get_parser(unicode(''), unicode(''), 0, [], False)
return new return new
parts = self._split_parts(code) del self.parsers[:]
self.parsers[:] = []
line_offset = 0 line_offset = 0
start = 0 start = 0
p = None p = None
is_first = True is_first = True
for code_part in self._split_parts(code):
for code_part in parts:
lines = code_part.count('\n') + 1
if is_first or line_offset >= p.module.end_pos[0]: if is_first or line_offset >= p.module.end_pos[0]:
indent = len(re.match(r'[ \t]*', code_part).group(0)) indent = len(code_part) - len(code_part.lstrip('\t '))
if is_first and self.current_node is not None: if is_first and self.current_node is not None:
nodes = [self.current_node] nodes = [self.current_node]
else: else:
nodes = [] nodes = []
if self.current_node is not None: if self.current_node is not None:
self.current_node = \ self.current_node = \
self.current_node.parent_until_indent(indent) self.current_node.parent_until_indent(indent)
nodes += self.current_node.old_children nodes += self.current_node.old_children
@@ -347,7 +336,7 @@ class FastParser(use_metaclass(CachedFastParser)):
#else: #else:
#print '#'*45, line_offset, p.module.end_pos, 'theheck\n', repr(code_part) #print '#'*45, line_offset, p.module.end_pos, 'theheck\n', repr(code_part)
line_offset += lines line_offset += code_part.count('\n') + 1
start += len(code_part) + 1 # +1 for newline start += len(code_part) + 1 # +1 for newline
if self.parsers: if self.parsers:
@@ -358,29 +347,26 @@ class FastParser(use_metaclass(CachedFastParser)):
self.module.end_pos = self.parsers[-1].module.end_pos self.module.end_pos = self.parsers[-1].module.end_pos
# print(self.parsers[0].module.get_code()) # print(self.parsers[0].module.get_code())
del code
def _get_parser(self, code, parser_code, line_offset, nodes, no_docstr): def _get_parser(self, code, parser_code, line_offset, nodes, no_docstr):
h = hash(code) h = hash(code)
hashes = [n.hash for n in nodes] for index, node in enumerate(nodes):
node = None if node.hash != h or node.code != code:
try: continue
index = hashes.index(h)
if nodes[index].code != code: if node != self.current_node:
raise ValueError() offset = int(nodes[0] == self.current_node)
except ValueError: self.current_node.old_children.pop(index - offset)
p = node.parser
m = p.module
m.line_offset += line_offset + 1 - m.start_pos[0]
break
else:
tokenizer = FastTokenizer(parser_code, line_offset) tokenizer = FastTokenizer(parser_code, line_offset)
p = Parser(parser_code, self.module_path, tokenizer=tokenizer, p = Parser(parser_code, self.module_path, tokenizer=tokenizer,
top_module=self.module, no_docstr=no_docstr) top_module=self.module, no_docstr=no_docstr)
p.module.parent = self.module p.module.parent = self.module
else: node = None
if nodes[index] != self.current_node:
offset = int(nodes[0] == self.current_node)
self.current_node.old_children.pop(index - offset)
node = nodes.pop(index)
p = node.parser
m = p.module
m.line_offset += line_offset + 1 - m.start_pos[0]
return p, node return p, node

View File

@@ -26,7 +26,7 @@ setup(name='jedi',
keywords='python completion refactoring vim', keywords='python completion refactoring vim',
long_description=readme, long_description=readme,
packages=['jedi', 'jedi.parser', 'jedi.evaluate', 'jedi.evaluate.compiled', 'jedi.api'], packages=['jedi', 'jedi.parser', 'jedi.evaluate', 'jedi.evaluate.compiled', 'jedi.api'],
package_data={'jedi': ['evlaluate/evaluate/compiled/fake/*.pym']}, package_data={'jedi': ['evaluate/compiled/fake/*.pym']},
platforms=['any'], platforms=['any'],
classifiers=[ classifiers=[
'Development Status :: 4 - Beta', 'Development Status :: 4 - Beta',

View File

@@ -127,6 +127,11 @@ def test_completion_docstring():
assert c.docstring(raw=True, fast=False) == cleandoc(Script.__doc__) assert c.docstring(raw=True, fast=False) == cleandoc(Script.__doc__)
def test_completion_params():
c = Script('import string; string.capwords').completions()[0]
assert [p.name for p in c.params] == ['s', 'sep']
def test_signature_params(): def test_signature_params():
def check(defs): def check(defs):
params = defs[0].params params = defs[0].params

View File

@@ -0,0 +1,12 @@
#!/usr/bin/python
import sys
sys.path[0:0] = [
'/usr/lib/python3.4/site-packages',
'/tmp/.buildout/eggs/important_package.egg'
]
import important_package
if __name__ == '__main__':
sys.exit(important_package.main())

View File

@@ -0,0 +1,71 @@
import os
from ..helpers import cwd_at
from jedi._compatibility import u
from jedi.parser import Parser
from jedi.evaluate.sys_path import (
_get_parent_dir_with_file,
_get_buildout_scripts,
_check_module
)
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
def test_parent_dir_with_file():
parent = _get_parent_dir_with_file(
os.path.abspath(os.curdir), 'buildout.cfg')
assert parent is not None
assert parent.endswith('test/test_evaluate/buildout_project')
@cwd_at('test/test_evaluate/buildout_project/src/proj_name')
def test_buildout_detection():
scripts = _get_buildout_scripts(os.path.abspath('./module_name.py'))
assert len(scripts) == 1
curdir = os.path.abspath(os.curdir)
appdir_path = os.path.normpath(os.path.join(curdir, '../../bin/app'))
assert scripts[0] == appdir_path
def test_append_on_non_sys_path():
SRC = u("""
class Dummy(object):
path = []
d = Dummy()
d.path.append('foo')""")
p = Parser(SRC)
paths = _check_module(p.module)
assert len(paths) > 0
assert 'foo' not in paths
def test_path_from_invalid_sys_path_assignment():
SRC = u("""
import sys
sys.path = 'invalid'""")
p = Parser(SRC)
paths = _check_module(p.module)
assert len(paths) > 0
assert 'invalid' not in paths
def test_path_from_sys_path_assignment():
SRC = u("""
#!/usr/bin/python
import sys
sys.path[0:0] = [
'/usr/lib/python3.4/site-packages',
'/home/test/.buildout/eggs/important_package.egg'
]
path[0:0] = [1]
import important_package
if __name__ == '__main__':
sys.exit(important_package.main())""")
p = Parser(SRC)
paths = _check_module(p.module)
assert 1 not in paths
assert '/home/test/.buildout/eggs/important_package.egg' in paths

View File

@@ -57,7 +57,7 @@ class TestRegression(TestCase):
self.assertRaises(jedi.NotFoundError, get_def, cls) self.assertRaises(jedi.NotFoundError, get_def, cls)
@pytest.mark.skip('Skip for now, test case is not really supported.') @pytest.mark.skipif('True', reason='Skip for now, test case is not really supported.')
@cwd_at('jedi') @cwd_at('jedi')
def test_add_dynamic_mods(self): def test_add_dynamic_mods(self):
fname = '__main__.py' fname = '__main__.py'

View File

@@ -2,7 +2,7 @@
envlist = py26, py27, py32, py33, py34 envlist = py26, py27, py32, py33, py34
[testenv] [testenv]
deps = deps =
https://bitbucket.org/hpk42/pytest/get/c4f58165e0d4.zip pytest>=2.3.5
# docopt for sith doctests # docopt for sith doctests
docopt docopt
# coloroma for colored debug output # coloroma for colored debug output