Merge branch 'dev' into bugfix/performances_degradation

# Conflicts:
#	test/test_regression.py
This commit is contained in:
ColinDuquesnoy
2016-07-30 16:52:17 +02:00
37 changed files with 647 additions and 252 deletions

View File

@@ -38,5 +38,7 @@ Sid Shanker (@squidarth) <sid.p.shanker@gmail.com>
Reinoud Elhorst (@reinhrst)
Guido van Rossum (@gvanrossum) <guido@python.org>
Dmytro Sadovnychyi (@sadovnychyi) <jedi@dmit.ro>
Cristi Burcă (@scribu)
Note: (@user) means a github user name.

View File

@@ -6,6 +6,7 @@ import sys
import imp
import os
import re
import pkgutil
try:
import importlib
except ImportError:
@@ -18,6 +19,18 @@ is_py35 = is_py3 and sys.version_info.minor >= 5
is_py26 = not is_py3 and sys.version_info[1] < 7
class DummyFile(object):
def __init__(self, loader, string):
self.loader = loader
self.string = string
def read(self):
return self.loader.get_source(self.string)
def close(self):
del self.loader
def find_module_py33(string, path=None):
loader = importlib.machinery.PathFinder.find_module(string, path)
@@ -35,30 +48,73 @@ def find_module_py33(string, path=None):
try:
is_package = loader.is_package(string)
if is_package:
if hasattr(loader, 'path'):
module_path = os.path.dirname(loader.path)
else:
# At least zipimporter does not have path attribute
module_path = os.path.dirname(loader.get_filename(string))
if hasattr(loader, 'archive'):
module_file = DummyFile(loader, string)
else:
module_file = None
else:
module_path = loader.get_filename(string)
module_file = open(module_path, 'rb')
module_file = DummyFile(loader, string)
except AttributeError:
# ExtensionLoader has not attribute get_filename, instead it has a
# path attribute that we can use to retrieve the module path
try:
module_path = loader.path
module_file = open(loader.path, 'rb')
module_file = DummyFile(loader, string)
except AttributeError:
module_path = string
module_file = None
finally:
is_package = False
if hasattr(loader, 'archive'):
module_path = loader.archive
return module_file, module_path, is_package
def find_module_pre_py33(string, path=None):
try:
module_file, module_path, description = imp.find_module(string, path)
module_type = description[2]
return module_file, module_path, module_type is imp.PKG_DIRECTORY
except ImportError:
pass
if path is None:
path = sys.path
for item in path:
loader = pkgutil.get_importer(item)
if loader:
try:
loader = loader.find_module(string)
if loader:
is_package = loader.is_package(string)
is_archive = hasattr(loader, 'archive')
try:
module_path = loader.get_filename(string)
except AttributeError:
# fallback for py26
try:
module_path = loader._get_filename(string)
except AttributeError:
continue
if is_package:
module_path = os.path.dirname(module_path)
if is_archive:
module_path = loader.archive
file = None
if not is_package or is_archive:
file = DummyFile(loader, string)
return (file, module_path, is_package)
except ImportError:
pass
raise ImportError("No module named {0}".format(string))
find_module = find_module_py33 if is_py33 else find_module_pre_py33

View File

@@ -80,7 +80,7 @@ class Completion:
self._code_lines = code_lines
# The first step of completions is to get the name
self._like_name = helpers.get_on_completion_name(code_lines, position)
self._like_name = helpers.get_on_completion_name(module, code_lines, position)
# The actual cursor position is not what we need to calculate
# everything. We want the start of the name we're on.
self._position = position[0], position[1] - len(self._like_name)

View File

@@ -10,6 +10,7 @@ from jedi.evaluate.helpers import call_of_leaf
from jedi import parser
from jedi.parser import tokenize, token
from jedi.cache import time_cache
from jedi import common
CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])
@@ -20,12 +21,18 @@ def sorted_definitions(defs):
return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0))
def get_on_completion_name(lines, position):
def get_on_completion_name(module, lines, position):
leaf = module.get_leaf_for_position(position)
if leaf is None or leaf.type in ('string', 'error_leaf'):
# Completions inside strings are a bit special, we need to parse the
# string. The same is true for comments and error_leafs.
line = lines[position[0] - 1]
# The first step of completions is to get the name
return re.search(
r'(?!\d)\w+$|$', line[:position[1]]
).group(0)
return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0)
elif leaf.type not in ('name', 'keyword'):
return ''
return leaf.value[:position[1] - leaf.start_pos[1]]
def _get_code(code_lines, start_pos, end_pos):
@@ -44,71 +51,107 @@ class OnErrorLeaf(Exception):
return self.args[0]
def _is_on_comment(leaf, position):
# We might be on a comment.
if leaf.type == 'endmarker':
try:
dedent = leaf.get_previous_leaf()
if dedent.type == 'dedent' and dedent.prefix:
# TODO This is needed because the fast parser uses multiple
# endmarker tokens within a file which is obviously ugly.
# This is so ugly that I'm not even commenting how it exactly
# happens, but let me tell you that I want to get rid of it.
leaf = dedent
except IndexError:
pass
comment_lines = common.splitlines(leaf.prefix)
difference = leaf.start_pos[0] - position[0]
prefix_start_pos = leaf.get_start_pos_of_prefix()
if difference == 0:
indent = leaf.start_pos[1]
elif position[0] == prefix_start_pos[0]:
indent = prefix_start_pos[1]
else:
indent = 0
line = comment_lines[-difference - 1][:position[1] - indent]
return '#' in line
def _get_code_for_stack(code_lines, module, position):
leaf = module.get_leaf_for_position(position, include_prefixes=True)
# It might happen that we're on whitespace or on a comment. This means
# that we would not get the right leaf.
if leaf.start_pos >= position:
if _is_on_comment(leaf, position):
return u('')
# If we're not on a comment simply get the previous leaf and proceed.
try:
leaf = leaf.get_previous_leaf()
except IndexError:
return u('') # At the beginning of the file.
is_after_newline = leaf.type == 'newline'
while leaf.type == 'newline':
try:
leaf = leaf.get_previous_leaf()
except IndexError:
return u('')
if leaf.type in ('indent', 'dedent'):
return u('')
elif leaf.type == 'error_leaf' or leaf.type == 'string':
# Error leafs cannot be parsed, completion in strings is also
# impossible.
raise OnErrorLeaf(leaf)
else:
if leaf == ';':
user_stmt = leaf.parent
else:
user_stmt = leaf.get_definition()
if user_stmt.parent.type == 'simple_stmt':
user_stmt = user_stmt.parent
if is_after_newline:
if user_stmt.start_pos[1] > position[1]:
# This means that it's actually a dedent and that means that we
# start without context (part of a suite).
return u('')
# This is basically getting the relevant lines.
return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position)
def get_stack_at_position(grammar, code_lines, module, pos):
"""
Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
"""
user_stmt = module.get_statement_for_position(pos)
if user_stmt is not None and user_stmt.type in ('indent', 'dedent'):
code = u('')
else:
if user_stmt is None:
user_stmt = module.get_leaf_for_position(pos, include_prefixes=True)
if pos <= user_stmt.start_pos:
try:
leaf = user_stmt.get_previous_leaf()
except IndexError:
pass
else:
user_stmt = module.get_statement_for_position(leaf.start_pos)
if user_stmt.type == 'error_leaf' or user_stmt.type == 'string':
# Error leafs cannot be parsed, completion in strings is also
# impossible.
raise OnErrorLeaf(user_stmt)
start_pos = user_stmt.start_pos
if user_stmt.first_leaf() == '@':
# TODO this once again proves that just using user_stmt.get_code
# would probably be nicer than _get_code.
# Remove the indent to have a statement that is aligned (properties
# on the same line as function)
start_pos = start_pos[0], 0
code = _get_code(code_lines, start_pos, pos)
if code == ';':
# ; cannot be parsed.
code = u('')
# Remove whitespace at the end. Necessary, because the tokenizer will parse
# an error token (there's no new line at the end in our case). This doesn't
# alter any truth about the valid tokens at that position.
code = code.rstrip('\t ')
# Remove as many indents from **all** code lines as possible.
code = dedent(code)
class EndMarkerReached(Exception):
pass
def tokenize_without_endmarker(code):
tokens = tokenize.source_tokens(code, use_exact_op_types=True)
for token_ in tokens:
if token_[0] == token.ENDMARKER:
if token_.string == safeword:
raise EndMarkerReached()
elif token_[0] == token.DEDENT:
# Ignore those. Error statements should not contain them, if
# they do it's for cases where an indentation happens and
# before the endmarker we still see them.
pass
else:
yield token_
p = parser.Parser(grammar, code, start_parsing=False)
code = _get_code_for_stack(code_lines, module, pos)
# We use a word to tell Jedi when we have reached the start of the
# completion.
# Use Z as a prefix because it's not part of a number suffix.
safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI'
# Remove as many indents from **all** code lines as possible.
code = code + safeword
p = parser.ParserWithRecovery(grammar, code, start_parsing=False)
try:
p.parse(tokenizer=tokenize_without_endmarker(code))
except EndMarkerReached:
return Stack(p.stack)
raise SystemError("This really shouldn't happen. There's a bug in Jedi.")
class Stack(list):

View File

@@ -128,8 +128,8 @@ def source_to_unicode(source, encoding=None):
# UTF-8 byte-order mark
return 'utf-8'
first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0)
possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)",
first_two_lines = re.match(br'(?:[^\n]*\n){0,2}', source).group(0)
possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)",
first_two_lines)
if possible_encoding:
return possible_encoding.group(1)
@@ -141,8 +141,11 @@ def source_to_unicode(source, encoding=None):
# only cast str/bytes
return source
encoding = detect_encoding()
if not isinstance(encoding, unicode):
encoding = unicode(encoding, 'utf-8', 'replace')
# cast to unicode by default
return unicode(source, detect_encoding(), 'replace')
return unicode(source, encoding, 'replace')
def splitlines(string):
@@ -158,3 +161,9 @@ def splitlines(string):
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(chain.from_iterable(iterable))
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper

View File

@@ -86,14 +86,9 @@ def increase_indent(func):
def dbg(message, *args, **kwargs):
""" Looks at the stack, to see if a debug message should be printed. """
if kwargs:
# Python 2 compatibility, because it doesn't understand default args
# after *args.
color = kwargs.get('color')
if color is None:
raise TypeError("debug.dbg doesn't support more named arguments than color")
else:
color = 'GREEN'
color = kwargs.pop('color', 'GREEN')
assert color
if debug_function and enable_notice:
frm = inspect.stack()[1]
@@ -104,10 +99,15 @@ def dbg(message, *args, **kwargs):
debug_function(color, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args))
def warning(message, *args):
def warning(message, *args, **kwargs):
format = kwargs.pop('format', True)
assert not kwargs
if debug_function and enable_warning:
i = ' ' * _debug_indent
debug_function('RED', i + 'warning: ' + message % tuple(u(repr(a)) for a in args))
if format:
message = message % tuple(u(repr(a)) for a in args)
debug_function('RED', i + 'warning: ' + message)
def speed(name):

View File

@@ -275,7 +275,7 @@ class Evaluator(object):
return self._eval_element_not_cached(element)
return self._eval_element_cached(element)
@memoize_default(evaluator_is_first_arg=True)
@memoize_default(default=set(), evaluator_is_first_arg=True)
def _eval_element_cached(self, element):
return self._eval_element_not_cached(element)

View File

@@ -91,7 +91,7 @@ def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None):
module_path = jedi_obj.get_parent_until().path
instance = typ(name, module_path, jedi_obj.start_pos, message)
debug.warning(str(instance))
debug.warning(str(instance), format=False)
evaluator.analysis.append(instance)

View File

@@ -156,7 +156,7 @@ def get_faked(module, obj, name=None):
doc = '"""%s"""' % obj.__doc__ # TODO need escapes.
suite = result.children[-1]
string = pt.String(pt.zero_position_modifier, doc, (0, 0), '')
new_line = pt.Whitespace('\n', (0, 0), '')
new_line = pt.Newline('\n', (0, 0), '')
docstr_node = pt.Node('simple_stmt', [string, new_line])
suite.children.insert(2, docstr_node)
return result

View File

@@ -21,6 +21,7 @@ from textwrap import dedent
from jedi.evaluate.cache import memoize_default
from jedi.parser import ParserWithRecovery, load_grammar
from jedi.parser.tree import Class
from jedi.common import indent_block
from jedi.evaluate.iterable import Array, FakeSequence, AlreadyEvaluated
@@ -174,13 +175,21 @@ def _execute_array_values(evaluator, array):
@memoize_default(None, evaluator_is_first_arg=True)
def follow_param(evaluator, param):
func = param.parent_function
def eval_docstring(docstring):
return set(
[p for param_str in _search_param_in_docstr(func.raw_doc,
str(param.name))
for p in _evaluate_for_statement_string(evaluator, param_str,
param.get_parent_until())])
[p for param_str in _search_param_in_docstr(docstring, str(param.name))
for p in _evaluate_for_statement_string(evaluator, param_str, module)]
)
func = param.parent_function
module = param.get_parent_until()
types = eval_docstring(func.raw_doc)
if func.name.value == '__init__':
cls = func.get_parent_until(Class)
if cls.type == 'classdef':
types |= eval_docstring(cls.raw_doc)
return types
@memoize_default(None, evaluator_is_first_arg=True)

View File

@@ -26,8 +26,8 @@ def deep_ast_copy(obj, parent=None, new_elements=None):
new_children = []
for child in obj.children:
typ = child.type
if typ in ('whitespace', 'operator', 'keyword', 'number', 'string',
'indent', 'dedent', 'error_leaf'):
if typ in ('newline', 'operator', 'keyword', 'number', 'string',
'indent', 'dedent', 'endmarker', 'error_leaf'):
# At the moment we're not actually copying those primitive
# elements, because there's really no need to. The parents are
# obviously wrong, but that's not an issue.

View File

@@ -68,11 +68,6 @@ class ImportWrapper(tree.Base):
@memoize_default()
def follow(self, is_goto=False):
if self._evaluator.recursion_detector.push_stmt(self._import):
# check recursion
return set()
try:
module = self._evaluator.wrap(self._import.get_parent_until())
import_path = self._import.path_for_name(self._name)
from_import_name = None
@@ -116,8 +111,6 @@ class ImportWrapper(tree.Base):
types = set(s.name for s in types)
debug.dbg('after import: %s', types)
finally:
self._evaluator.recursion_detector.pop_stmt()
return types
@@ -285,20 +278,17 @@ class Importer(object):
# We can take the first element, because only the os special
# case yields multiple modules, which is not important for
# further imports.
base = list(bases)[0]
parent_module = list(bases)[0]
# This is a huge exception, we follow a nested import
# ``os.path``, because it's a very important one in Python
# that is being achieved by messing with ``sys.modules`` in
# ``os``.
if [str(i) for i in import_path] == ['os', 'path']:
return self._evaluator.find_types(base, 'path')
return self._evaluator.find_types(parent_module, 'path')
try:
# It's possible that by giving it always the sys path (and not
# the __path__ attribute of the parent, we get wrong results
# and nested namespace packages don't work. But I'm not sure.
paths = base.py__path__(sys_path)
paths = parent_module.py__path__()
except AttributeError:
# The module is not a package.
_add_error(self._evaluator, import_path[-1])
@@ -318,6 +308,7 @@ class Importer(object):
_add_error(self._evaluator, import_path[-1])
return set()
else:
parent_module = None
try:
debug.dbg('search_module %s in %s', import_parts[-1], self.file_path)
# Override the sys.path. It works only good that way.
@@ -337,15 +328,18 @@ class Importer(object):
if is_pkg:
# In this case, we don't have a file yet. Search for the
# __init__ file.
if module_path.endswith(('.zip', '.egg')):
source = module_file.loader.get_source(module_name)
else:
module_path = get_init_path(module_path)
elif module_file:
source = module_file.read()
module_file.close()
if module_file is None and not module_path.endswith('.py'):
if module_file is None and not module_path.endswith(('.py', '.zip', '.egg')):
module = compiled.load_module(self._evaluator, module_path)
else:
module = _load_module(self._evaluator, module_path, source, sys_path)
module = _load_module(self._evaluator, module_path, source, sys_path, parent_module)
if module is None:
# The file might raise an ImportError e.g. and therefore not be
@@ -408,7 +402,7 @@ class Importer(object):
# namespace packages
if isinstance(scope, tree.Module) and scope.path.endswith('__init__.py'):
paths = scope.py__path__(self.sys_path_with_modifications())
paths = scope.py__path__()
names += self._get_module_names(paths)
if only_modules:
@@ -441,10 +435,10 @@ class Importer(object):
return names
def _load_module(evaluator, path=None, source=None, sys_path=None):
def _load_module(evaluator, path=None, source=None, sys_path=None, parent_module=None):
def load(source):
dotted_path = path and compiled.dotted_from_fs_path(path, sys_path)
if path is not None and path.endswith('.py') \
if path is not None and path.endswith(('.py', '.zip', '.egg')) \
and dotted_path not in settings.auto_import_modules:
if source is None:
with open(path, 'rb') as f:
@@ -454,7 +448,8 @@ def _load_module(evaluator, path=None, source=None, sys_path=None):
p = path
p = fast.FastParser(evaluator.grammar, common.source_to_unicode(source), p)
save_parser(path, p)
return p.module
from jedi.evaluate.representation import ModuleWrapper
return ModuleWrapper(evaluator, p.module, parent_module)
if sys_path is None:
sys_path = evaluator.sys_path

View File

@@ -30,6 +30,7 @@ from jedi.evaluate import helpers
from jedi.evaluate.cache import CachedMetaClass, memoize_default
from jedi.evaluate import analysis
from jedi.evaluate import pep0484
from jedi import common
class IterableWrapper(tree.Base):
@@ -180,23 +181,29 @@ class Comprehension(IterableWrapper):
"""
comp_for = self._get_comp_for()
# For nested comprehensions we need to search the last one.
from jedi.evaluate.representation import InstanceElement
node = self._get_comprehension().children[index]
if isinstance(node, InstanceElement):
# This seems to be a strange case that I haven't found a way to
# write tests against. However since it's my new goal to get rid of
# InstanceElement anyway, I don't care.
node = node.var
last_comp = list(comp_for.get_comp_fors())[-1]
return helpers.deep_ast_copy(self._get_comprehension().children[index], parent=last_comp)
return helpers.deep_ast_copy(node, parent=last_comp)
@memoize_default()
def _iterate(self):
def nested(comp_fors):
def _nested(self, comp_fors):
evaluator = self._evaluator
comp_for = comp_fors[0]
input_node = comp_for.children[3]
input_types = evaluator.eval_element(input_node)
iterated = py__iter__(evaluator, input_types, input_node)
exprlist = comp_for.children[1]
for types in iterated:
for i, types in enumerate(iterated):
evaluator.predefined_if_name_dict_dict[comp_for] = \
unpack_tuple_to_dict(evaluator, types, exprlist)
try:
for result in nested(comp_fors[1:]):
for result in self._nested(comp_fors[1:]):
yield result
except IndexError:
iterated = evaluator.eval_element(self._eval_node())
@@ -207,9 +214,11 @@ class Comprehension(IterableWrapper):
finally:
del evaluator.predefined_if_name_dict_dict[comp_for]
evaluator = self._evaluator
comp_fors = list(self._get_comp_for().get_comp_fors())
for result in nested(comp_fors):
@memoize_default(default=[])
@common.to_list
def _iterate(self):
comp_fors = tuple(self._get_comp_for().get_comp_fors())
for result in self._nested(comp_fors):
yield result
def py__iter__(self):
@@ -252,7 +261,7 @@ class ArrayMixin(object):
@register_builtin_method('values', type='dict')
def _imitate_values(self):
items = self.dict_values()
return create_evaluated_sequence_set(self._evaluator, items, type='list')
return create_evaluated_sequence_set(self._evaluator, items, sequence_type='list')
#return set([FakeSequence(self._evaluator, [AlreadyEvaluated(items)], 'tuple')])
@register_builtin_method('items', type='dict')
@@ -260,7 +269,7 @@ class ArrayMixin(object):
items = [set([FakeSequence(self._evaluator, (k, v), 'tuple')])
for k, v in self._items()]
return create_evaluated_sequence_set(self._evaluator, *items, type='list')
return create_evaluated_sequence_set(self._evaluator, *items, sequence_type='list')
class ListComprehension(Comprehension, ArrayMixin):
@@ -268,7 +277,14 @@ class ListComprehension(Comprehension, ArrayMixin):
def py__getitem__(self, index):
all_types = list(self.py__iter__())
return all_types[index]
result = all_types[index]
if isinstance(index, slice):
return create_evaluated_sequence_set(
self._evaluator,
unite(result),
sequence_type='list'
)
return result
class SetComprehension(Comprehension, ArrayMixin):
@@ -303,9 +319,7 @@ class DictComprehension(Comprehension, ArrayMixin):
(AlreadyEvaluated(keys), AlreadyEvaluated(values)), 'tuple')
for keys, values in self._iterate())
return create_evaluated_sequence_set(self._evaluator, items, type='list')
return create_evaluated_sequence_set(self._evaluator, items, sequence_type='list')
class GeneratorComprehension(Comprehension, GeneratorMixin):
@@ -446,7 +460,9 @@ def create_evaluated_sequence_set(evaluator, *types_order, **kwargs):
``sequence_type`` is a named argument, that doesn't work in Python2. For backwards
compatibility reasons, we're now using kwargs.
"""
sequence_type = kwargs.get('sequence_type')
sequence_type = kwargs.pop('sequence_type')
assert not kwargs
sets = tuple(AlreadyEvaluated(types) for types in types_order)
return set([FakeSequence(evaluator, sets, sequence_type)])
@@ -676,7 +692,7 @@ def _check_array_additions(evaluator, compare_array, module, is_list):
# Arguments([AlreadyEvaluated([_ArrayInstance])]) inside
# Yeah... I know... It's complicated ;-)
node = list(element.var_args.argument_node[0])[0].var_args.trailer
if isinstance(node, er.InstanceElement):
if isinstance(node, er.InstanceElement) or node is None:
return node
return node.get_parent_until(er.FunctionExecution)
@@ -729,10 +745,11 @@ def _check_array_additions(evaluator, compare_array, module, is_list):
# Check for recursion. Possible by using 'extend' in
# combination with function calls.
continue
try:
if compare_array in evaluator.eval_element(power):
# The arrays match. Now add the results
added_types |= check_additions(execution_trailer.children[1], add_name)
finally:
evaluator.recursion_detector.pop_stmt()
# reset settings
settings.dynamic_params_for_other_modules = temp_param_add
@@ -777,6 +794,8 @@ class _ArrayInstance(IterableWrapper):
yield types
module = self.var_args.get_parent_until()
if module is None:
return
is_list = str(self.instance.name) == 'list'
additions = _check_array_additions(self._evaluator, self.instance, module, is_list)
if additions:

View File

@@ -15,7 +15,6 @@ from jedi.evaluate import iterable
def recursion_decorator(func):
def run(evaluator, stmt, *args, **kwargs):
rec_detect = evaluator.recursion_detector
# print stmt, len(self.node_statements())
if rec_detect.push_stmt(stmt):
return set()
else:

View File

@@ -472,7 +472,10 @@ class Class(use_metaclass(CachedMetaClass, Wrapper)):
@property
def params(self):
try:
return self.get_subscope_by_name('__init__').params
except KeyError:
return [] # object.__init__
def names_dicts(self, search_global, is_instance=False):
if search_global:
@@ -803,9 +806,10 @@ class GlobalName(helpers.FakeName):
class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)):
def __init__(self, evaluator, module):
def __init__(self, evaluator, module, parent_module=None):
self._evaluator = evaluator
self.base = self._module = module
self._parent_module = parent_module
def names_dicts(self, search_global):
yield self.base.names_dict
@@ -851,6 +855,10 @@ class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)):
return helpers.FakeName(unicode(self.base.name), self, (1, 0))
def _get_init_directory(self):
"""
:return: The path to the directory of a package. None in case it's not
a package.
"""
for suffix, _, _ in imp.get_suffixes():
ending = '__init__' + suffix
py__file__ = self.py__file__()
@@ -881,22 +889,13 @@ class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)):
else:
return self.py__name__()
@property
def py__path__(self):
"""
Not seen here, since it's a property. The callback actually uses a
variable, so use it like::
foo.py__path__(sys_path)
In case of a package, this returns Python's __path__ attribute, which
is a list of paths (strings).
Raises an AttributeError if the module is not a package.
"""
def return_value(search_path):
def _py__path__(self):
if self._parent_module is None:
search_path = self._evaluator.sys_path
else:
search_path = self._parent_module.py__path__()
init_path = self.py__file__()
if os.path.basename(init_path) == '__init__.py':
with open(init_path, 'rb') as f:
content = common.source_to_unicode(f.read())
# these are strings that need to be used for namespace packages,
@@ -912,14 +911,26 @@ class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)):
paths.add(other)
return list(paths)
# Default to this.
return [path]
return [self._get_init_directory()]
@property
def py__path__(self):
"""
Not seen here, since it's a property. The callback actually uses a
variable, so use it like::
foo.py__path__(sys_path)
In case of a package, this returns Python's __path__ attribute, which
is a list of paths (strings).
Raises an AttributeError if the module is not a package.
"""
path = self._get_init_directory()
if path is None:
raise AttributeError('Only packages have __path__ attributes.')
else:
return return_value
return self._py__path__
@memoize_default()
def _sub_modules_dict(self):

View File

@@ -214,12 +214,14 @@ class Parser(object):
return pt.String(self.position_modifier, value, start_pos, prefix)
elif type == NUMBER:
return pt.Number(self.position_modifier, value, start_pos, prefix)
elif type in (NEWLINE, ENDMARKER):
return pt.Whitespace(self.position_modifier, value, start_pos, prefix)
elif type == NEWLINE:
return pt.Newline(self.position_modifier, value, start_pos, prefix)
elif type == INDENT:
return pt.Indent(self.position_modifier, value, start_pos, prefix)
elif type == DEDENT:
return pt.Dedent(self.position_modifier, value, start_pos, prefix)
elif type == ENDMARKER:
return pt.EndMarker(self.position_modifier, value, start_pos, prefix)
else:
return pt.Operator(self.position_modifier, value, start_pos, prefix)
@@ -287,7 +289,8 @@ class ParserWithRecovery(Parser):
:param module_path: The path of the module in the file system, may be None.
:type module_path: str
"""
def __init__(self, grammar, source, module_path=None, tokenizer=None):
def __init__(self, grammar, source, module_path=None, tokenizer=None,
start_parsing=True):
self.syntax_errors = []
self._omit_dedent_list = []
@@ -302,8 +305,12 @@ class ParserWithRecovery(Parser):
# if self.options["print_function"]:
# python_grammar = pygram.python_grammar_no_print_statement
# else:
super(ParserWithRecovery, self).__init__(grammar, source, tokenizer=tokenizer)
super(ParserWithRecovery, self).__init__(
grammar, source,
tokenizer=tokenizer,
start_parsing=start_parsing
)
if start_parsing:
self.module = self._parsed
self.module.used_names = self._used_names
self.module.path = module_path

View File

@@ -120,6 +120,10 @@ class ParserNode(object):
self.source = source
self.hash = hash(source)
self.parser = parser
if source:
self._end_pos = parser.module.end_pos
else:
self._end_pos = 1, 0
try:
# With fast_parser we have either 1 subscope or only statements.
@@ -162,6 +166,10 @@ class ParserNode(object):
# There's no module yet.
return '<%s: empty>' % type(self).__name__
@property
def end_pos(self):
return self._end_pos[0] + self.parser.position_modifier.line, self._end_pos[1]
def reset_node(self):
"""
Removes changes that were applied in this class.
@@ -188,6 +196,10 @@ class ParserNode(object):
# Need to insert the own node as well.
dcts.insert(0, self._content_scope.names_dict)
self._content_scope.names_dict = MergedNamesDict(dcts)
endmarker = self.parser.get_parsed_node().children[-1]
assert endmarker.type == 'endmarker'
last_parser = self._node_children[-1].parser
endmarker.start_pos = last_parser.get_parsed_node().end_pos
@property
def _indent(self):
@@ -414,7 +426,7 @@ class FastParser(use_metaclass(CachedFastParser)):
# called - just ignore it.
src = ''.join(self._lines[code_part_end_line - 1:])
self._parse_part(code_part, src, code_part_end_line, nodes)
last_end_line = self.current_node.parser.module.end_pos[0]
last_end_line = self.current_node.end_pos[0]
debug.dbg("While parsing %s, starting with line %s wasn't included in split.",
self.module_path, code_part_end_line)
#assert code_part_end_line > last_end_line
@@ -426,7 +438,7 @@ class FastParser(use_metaclass(CachedFastParser)):
code_part_end_line = next_code_part_end_line
start += len(code_part)
last_end_line = self.current_node.parser.module.end_pos[0]
last_end_line = self.current_node.end_pos[0]
if added_newline:
self.current_node.remove_last_newline()

View File

@@ -147,10 +147,12 @@ class Base(object):
return scope
def get_definition(self):
if self.type in ('newline', 'dedent', 'indent', 'endmarker'):
raise ValueError('Cannot get the indentation of whitespace or indentation.')
scope = self
while scope.parent is not None:
parent = scope.parent
if scope.isinstance(Node, Name) and parent.type != 'simple_stmt':
if scope.isinstance(Node, Leaf) and parent.type != 'simple_stmt':
if scope.type == 'testlist_comp':
try:
if isinstance(scope.children[1], CompFor):
@@ -292,7 +294,11 @@ class Leaf(Base):
def get_start_pos_of_prefix(self):
try:
return self.get_previous_leaf().end_pos
previous_leaf = self
while True:
previous_leaf = previous_leaf.get_previous_leaf()
if previous_leaf.type not in ('indent', 'dedent'):
return previous_leaf.end_pos
except IndexError:
return 1, 0 # It's the first leaf.
@@ -348,10 +354,15 @@ class LeafWithNewLines(Leaf):
return "<%s: %r>" % (type(self).__name__, self.value)
class Whitespace(LeafWithNewLines):
class EndMarker(Leaf):
__slots__ = ()
type = 'endmarker'
class Newline(LeafWithNewLines):
"""Contains NEWLINE and ENDMARKER tokens."""
__slots__ = ()
type = 'whitespace'
type = 'newline'
@utf8_repr
def __repr__(self):
@@ -421,7 +432,7 @@ class Indent(Leaf):
class Dedent(Leaf):
type = 'indent'
type = 'dedent'
__slots__ = ()
@@ -539,6 +550,10 @@ class BaseNode(Base):
try:
return c.get_leaf_for_position(position, include_prefixes)
except AttributeError:
while c.type in ('indent', 'dedent'):
# We'd rather not have indents and dedents as a leaf,
# because they don't contain indentation information.
c = c.get_next_leaf()
return c
return None

View File

@@ -74,7 +74,7 @@ def setup_readline(namespace_module=__main__):
lines = common.splitlines(text)
position = (len(lines), len(lines[-1]))
name = get_on_completion_name(lines, position)
name = get_on_completion_name(interpreter._get_module(), lines, position)
before = text[:len(text) - len(name)]
completions = interpreter.completions()
finally:

View File

@@ -311,6 +311,17 @@ for i in 0, 2:
#? int() str()
GetItemWithList()[i]
# With super
class SuperYeah(list):
def __getitem__(self, index):
return super()[index]
#?
SuperYeah([1])[0]
#?
SuperYeah()[0]
# -----------------
# conversions
# -----------------
@@ -368,20 +379,6 @@ for i in set(a for a in [1]):
i
# -----------------
# Recursions
# -----------------
def to_list(iterable):
return list(set(iterable))
def recursion1(foo):
return to_list(to_list(foo)) + recursion1(foo)
#? int()
recursion1([1,2])[0]
# -----------------
# Merged Arrays
# -----------------

View File

@@ -190,6 +190,16 @@ def a():
#?
# str literals in comment """ upper
def completion_in_comment():
#? ['Exception']
# might fail because the comment is not a leaf: Exception
pass
some_word
#? ['Exception']
# Very simple comment completion: Exception
# Commment after it
# -----------------
# magic methods
# -----------------

View File

@@ -294,20 +294,6 @@ class A():
#? list()
A().b()
# -----------------
# recursions
# -----------------
def Recursion():
def recurse(self):
self.a = self.a
self.b = self.b.recurse()
#?
Recursion().a
#?
Recursion().b
# -----------------
# ducktyping
# -----------------

View File

@@ -0,0 +1,26 @@
"""
Special cases of completions (typically special positions that caused issues
with context parsing.
"""
def pass_decorator(func):
return func
def x():
return (
1,
#? ["tuple"]
tuple
)
# Comment just somewhere
class MyClass:
@pass_decorator
def x(foo,
#? 5 ["tuple"]
tuple,
):
return 1

View File

@@ -173,4 +173,28 @@ def x():
[a for a in h if hio]
if hio: pass
# -----------------
# slices
# -----------------
#? list()
foo = [x for x in [1, '']][:1]
#? int()
foo[0]
# -----------------
# In class
# -----------------
class X():
def __init__(self, bar):
self.bar = bar
def foo(self):
x = [a for a in self.bar][0]
#? int()
x
return x
#? int()
X([1]).foo()

View File

@@ -177,3 +177,44 @@ d = ''
""" bsdf """
#? str()
d.upper()
# -----------------
# class docstrings
# -----------------
class InInit():
def __init__(self, foo):
"""
:type foo: str
"""
#? str()
foo
class InClass():
"""
:type foo: str
"""
def __init__(self, foo):
#? str()
foo
class InBoth():
"""
:type foo: str
"""
def __init__(self, foo):
"""
:type foo: int
"""
#? str() int()
foo
def __init__(foo):
"""
:type foo: str
"""
#? str()
foo

View File

@@ -102,6 +102,13 @@ def f(t=None):
#! 9 ['t=None']
t = t or 1
class X():
pass
#! 3 []
X(foo=x)
# -----------------
# imports
# -----------------

View File

@@ -0,0 +1,51 @@
"""
Code that might cause recursion issues (or has caused in the past).
"""
def Recursion():
def recurse(self):
self.a = self.a
self.b = self.b.recurse()
#?
Recursion().a
#?
Recursion().b
class X():
def __init__(self):
self.recursive = [1, 3]
def annoying(self):
self.recursive = [self.recursive[0]]
def recurse(self):
self.recursive = [self.recursive[1]]
#? int()
X().recursive[0]
def to_list(iterable):
return list(set(iterable))
def recursion1(foo):
return to_list(to_list(foo)) + recursion1(foo)
#? int()
recursion1([1,2])[0]
class FooListComp():
def __init__(self):
self.recursive = [1]
def annoying(self):
self.recursive = [x for x in self.recursive]
#? int()
FooListComp().recursive[0]

View File

@@ -83,7 +83,7 @@ def test_completion_on_hex_literals():
_check_number('0x1.', 'int') # hexdecimal
# Completing binary literals doesn't work if they are not actually binary
# (invalid statements).
assert api.Script('0b2.').completions() == []
assert api.Script('0b2.b').completions() == []
_check_number('0b1.', 'int') # binary
_check_number('0x2e.', 'int')
@@ -98,8 +98,10 @@ def test_completion_on_complex_literals():
_check_number('1j.', 'complex')
_check_number('44.j.', 'complex')
_check_number('4.0j.', 'complex')
# No dot no completion
assert api.Script('4j').completions() == []
# No dot no completion - I thought, but 4j is actually a literall after
# which a keyword like or is allowed. Good times, haha!
assert (set([c.name for c in api.Script('4j').completions()]) ==
set(['if', 'and', 'in', 'is', 'not', 'or']))
def test_goto_assignments_on_non_name():

View File

@@ -0,0 +1,9 @@
from textwrap import dedent
from jedi import Script
def test_in_whitespace():
code = dedent('''
def x():
pass''')
assert len(Script(code, column=2).completions()) > 20

View File

@@ -0,0 +1,4 @@
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass

View File

@@ -0,0 +1 @@
CONST = 1

View File

@@ -4,7 +4,7 @@ import sys
import pytest
import jedi
from jedi._compatibility import find_module_py33
from jedi._compatibility import find_module_py33, find_module
from ..helpers import cwd_at
@@ -14,6 +14,44 @@ def test_find_module_py33():
assert find_module_py33('_io') == (None, '_io', False)
def test_find_module_package():
file, path, is_package = find_module('json')
assert file is None
assert path.endswith('json')
assert is_package is True
def test_find_module_not_package():
file, path, is_package = find_module('io')
assert file is not None
assert path.endswith('io.py')
assert is_package is False
def test_find_module_package_zipped():
if 'zipped_imports/pkg.zip' not in sys.path:
sys.path.append(os.path.join(os.path.dirname(__file__),
'zipped_imports/pkg.zip'))
file, path, is_package = find_module('pkg')
assert file is not None
assert path.endswith('pkg.zip')
assert is_package is True
assert len(jedi.Script('import pkg; pkg.mod', 1, 19).completions()) == 1
@pytest.mark.skipif('sys.version_info < (2,7)')
def test_find_module_not_package_zipped():
if 'zipped_imports/not_pkg.zip' not in sys.path:
sys.path.append(os.path.join(os.path.dirname(__file__),
'zipped_imports/not_pkg.zip'))
file, path, is_package = find_module('not_pkg')
assert file is not None
assert path.endswith('not_pkg.zip')
assert is_package is False
assert len(
jedi.Script('import not_pkg; not_pkg.val', 1, 27).completions()) == 1
@cwd_at('test/test_evaluate/not_in_sys_path/pkg')
def test_import_not_in_sys_path():
"""

View File

@@ -51,3 +51,15 @@ def test_namespace_package():
completion = c
solution = "statement: foo = '%s'" % solution
assert completion.description == solution
def test_nested_namespace_package():
CODE = 'from nested_namespaces.namespace.pkg import CONST'
sys_path = [dirname(__file__)]
script = jedi.Script(sys_path=sys_path, source=CODE, line=1, column=45)
result = script.goto_definitions()
assert len(result) == 1

Binary file not shown.

Binary file not shown.

View File

@@ -14,6 +14,7 @@ import jedi
from jedi._compatibility import u
from jedi import Script
from jedi import api
from jedi import common
from jedi.evaluate import imports
from jedi.parser import ParserWithRecovery, load_grammar
@@ -179,6 +180,15 @@ class TestRegression(TestCase):
else:
assert n == limit
def test_source_to_unicode_unicode_text(self):
source = (
b"# vim: fileencoding=utf-8\n"
b"# \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\n"
)
actual = common.source_to_unicode(source)
expected = source.decode('utf-8')
assert actual == expected
def test_loading_unicode_files_with_bad_global_charset(monkeypatch, tmpdir):
dirname = str(tmpdir.mkdir('jedi-test'))