diff --git a/AUTHORS.txt b/AUTHORS.txt index 9e7f9800..ad4a007f 100644 --- a/AUTHORS.txt +++ b/AUTHORS.txt @@ -38,5 +38,7 @@ Sid Shanker (@squidarth) Reinoud Elhorst (@reinhrst) Guido van Rossum (@gvanrossum) Dmytro Sadovnychyi (@sadovnychyi) +Cristi Burcă (@scribu) + Note: (@user) means a github user name. diff --git a/jedi/_compatibility.py b/jedi/_compatibility.py index 0fde756c..3c14b6f2 100644 --- a/jedi/_compatibility.py +++ b/jedi/_compatibility.py @@ -6,6 +6,7 @@ import sys import imp import os import re +import pkgutil try: import importlib except ImportError: @@ -18,6 +19,18 @@ is_py35 = is_py3 and sys.version_info.minor >= 5 is_py26 = not is_py3 and sys.version_info[1] < 7 +class DummyFile(object): + def __init__(self, loader, string): + self.loader = loader + self.string = string + + def read(self): + return self.loader.get_source(self.string) + + def close(self): + del self.loader + + def find_module_py33(string, path=None): loader = importlib.machinery.PathFinder.find_module(string, path) @@ -35,30 +48,73 @@ def find_module_py33(string, path=None): try: is_package = loader.is_package(string) if is_package: - module_path = os.path.dirname(loader.path) - module_file = None + if hasattr(loader, 'path'): + module_path = os.path.dirname(loader.path) + else: + # At least zipimporter does not have path attribute + module_path = os.path.dirname(loader.get_filename(string)) + if hasattr(loader, 'archive'): + module_file = DummyFile(loader, string) + else: + module_file = None else: module_path = loader.get_filename(string) - module_file = open(module_path, 'rb') + module_file = DummyFile(loader, string) except AttributeError: # ExtensionLoader has not attribute get_filename, instead it has a # path attribute that we can use to retrieve the module path try: module_path = loader.path - module_file = open(loader.path, 'rb') + module_file = DummyFile(loader, string) except AttributeError: module_path = string module_file = None finally: is_package = False + if hasattr(loader, 'archive'): + module_path = loader.archive + return module_file, module_path, is_package def find_module_pre_py33(string, path=None): - module_file, module_path, description = imp.find_module(string, path) - module_type = description[2] - return module_file, module_path, module_type is imp.PKG_DIRECTORY + try: + module_file, module_path, description = imp.find_module(string, path) + module_type = description[2] + return module_file, module_path, module_type is imp.PKG_DIRECTORY + except ImportError: + pass + + if path is None: + path = sys.path + for item in path: + loader = pkgutil.get_importer(item) + if loader: + try: + loader = loader.find_module(string) + if loader: + is_package = loader.is_package(string) + is_archive = hasattr(loader, 'archive') + try: + module_path = loader.get_filename(string) + except AttributeError: + # fallback for py26 + try: + module_path = loader._get_filename(string) + except AttributeError: + continue + if is_package: + module_path = os.path.dirname(module_path) + if is_archive: + module_path = loader.archive + file = None + if not is_package or is_archive: + file = DummyFile(loader, string) + return (file, module_path, is_package) + except ImportError: + pass + raise ImportError("No module named {0}".format(string)) find_module = find_module_py33 if is_py33 else find_module_pre_py33 diff --git a/jedi/api/completion.py b/jedi/api/completion.py index a580fe9d..0602d489 100644 --- a/jedi/api/completion.py +++ b/jedi/api/completion.py @@ -80,7 +80,7 @@ class Completion: self._code_lines = code_lines # The first step of completions is to get the name - self._like_name = helpers.get_on_completion_name(code_lines, position) + self._like_name = helpers.get_on_completion_name(module, code_lines, position) # The actual cursor position is not what we need to calculate # everything. We want the start of the name we're on. self._position = position[0], position[1] - len(self._like_name) diff --git a/jedi/api/helpers.py b/jedi/api/helpers.py index a174eed6..adb9564e 100644 --- a/jedi/api/helpers.py +++ b/jedi/api/helpers.py @@ -10,6 +10,7 @@ from jedi.evaluate.helpers import call_of_leaf from jedi import parser from jedi.parser import tokenize, token from jedi.cache import time_cache +from jedi import common CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name']) @@ -20,12 +21,18 @@ def sorted_definitions(defs): return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0)) -def get_on_completion_name(lines, position): - line = lines[position[0] - 1] - # The first step of completions is to get the name - return re.search( - r'(?!\d)\w+$|$', line[:position[1]] - ).group(0) +def get_on_completion_name(module, lines, position): + leaf = module.get_leaf_for_position(position) + if leaf is None or leaf.type in ('string', 'error_leaf'): + # Completions inside strings are a bit special, we need to parse the + # string. The same is true for comments and error_leafs. + line = lines[position[0] - 1] + # The first step of completions is to get the name + return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0) + elif leaf.type not in ('name', 'keyword'): + return '' + + return leaf.value[:position[1] - leaf.start_pos[1]] def _get_code(code_lines, start_pos, end_pos): @@ -44,71 +51,107 @@ class OnErrorLeaf(Exception): return self.args[0] +def _is_on_comment(leaf, position): + # We might be on a comment. + if leaf.type == 'endmarker': + try: + dedent = leaf.get_previous_leaf() + if dedent.type == 'dedent' and dedent.prefix: + # TODO This is needed because the fast parser uses multiple + # endmarker tokens within a file which is obviously ugly. + # This is so ugly that I'm not even commenting how it exactly + # happens, but let me tell you that I want to get rid of it. + leaf = dedent + except IndexError: + pass + + comment_lines = common.splitlines(leaf.prefix) + difference = leaf.start_pos[0] - position[0] + prefix_start_pos = leaf.get_start_pos_of_prefix() + if difference == 0: + indent = leaf.start_pos[1] + elif position[0] == prefix_start_pos[0]: + indent = prefix_start_pos[1] + else: + indent = 0 + line = comment_lines[-difference - 1][:position[1] - indent] + return '#' in line + + +def _get_code_for_stack(code_lines, module, position): + leaf = module.get_leaf_for_position(position, include_prefixes=True) + # It might happen that we're on whitespace or on a comment. This means + # that we would not get the right leaf. + if leaf.start_pos >= position: + if _is_on_comment(leaf, position): + return u('') + + # If we're not on a comment simply get the previous leaf and proceed. + try: + leaf = leaf.get_previous_leaf() + except IndexError: + return u('') # At the beginning of the file. + + is_after_newline = leaf.type == 'newline' + while leaf.type == 'newline': + try: + leaf = leaf.get_previous_leaf() + except IndexError: + return u('') + + if leaf.type in ('indent', 'dedent'): + return u('') + elif leaf.type == 'error_leaf' or leaf.type == 'string': + # Error leafs cannot be parsed, completion in strings is also + # impossible. + raise OnErrorLeaf(leaf) + else: + if leaf == ';': + user_stmt = leaf.parent + else: + user_stmt = leaf.get_definition() + if user_stmt.parent.type == 'simple_stmt': + user_stmt = user_stmt.parent + + if is_after_newline: + if user_stmt.start_pos[1] > position[1]: + # This means that it's actually a dedent and that means that we + # start without context (part of a suite). + return u('') + + # This is basically getting the relevant lines. + return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position) + + def get_stack_at_position(grammar, code_lines, module, pos): """ Returns the possible node names (e.g. import_from, xor_test or yield_stmt). """ - user_stmt = module.get_statement_for_position(pos) - - if user_stmt is not None and user_stmt.type in ('indent', 'dedent'): - code = u('') - else: - if user_stmt is None: - user_stmt = module.get_leaf_for_position(pos, include_prefixes=True) - if pos <= user_stmt.start_pos: - try: - leaf = user_stmt.get_previous_leaf() - except IndexError: - pass - else: - user_stmt = module.get_statement_for_position(leaf.start_pos) - - if user_stmt.type == 'error_leaf' or user_stmt.type == 'string': - # Error leafs cannot be parsed, completion in strings is also - # impossible. - raise OnErrorLeaf(user_stmt) - - start_pos = user_stmt.start_pos - if user_stmt.first_leaf() == '@': - # TODO this once again proves that just using user_stmt.get_code - # would probably be nicer than _get_code. - # Remove the indent to have a statement that is aligned (properties - # on the same line as function) - start_pos = start_pos[0], 0 - - code = _get_code(code_lines, start_pos, pos) - if code == ';': - # ; cannot be parsed. - code = u('') - - # Remove whitespace at the end. Necessary, because the tokenizer will parse - # an error token (there's no new line at the end in our case). This doesn't - # alter any truth about the valid tokens at that position. - code = code.rstrip('\t ') - # Remove as many indents from **all** code lines as possible. - code = dedent(code) - class EndMarkerReached(Exception): pass def tokenize_without_endmarker(code): tokens = tokenize.source_tokens(code, use_exact_op_types=True) for token_ in tokens: - if token_[0] == token.ENDMARKER: + if token_.string == safeword: raise EndMarkerReached() - elif token_[0] == token.DEDENT: - # Ignore those. Error statements should not contain them, if - # they do it's for cases where an indentation happens and - # before the endmarker we still see them. - pass else: yield token_ - p = parser.Parser(grammar, code, start_parsing=False) + code = _get_code_for_stack(code_lines, module, pos) + # We use a word to tell Jedi when we have reached the start of the + # completion. + # Use Z as a prefix because it's not part of a number suffix. + safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI' + # Remove as many indents from **all** code lines as possible. + code = code + safeword + + p = parser.ParserWithRecovery(grammar, code, start_parsing=False) try: p.parse(tokenizer=tokenize_without_endmarker(code)) except EndMarkerReached: return Stack(p.stack) + raise SystemError("This really shouldn't happen. There's a bug in Jedi.") class Stack(list): diff --git a/jedi/common.py b/jedi/common.py index 3655c0bd..ae10f423 100644 --- a/jedi/common.py +++ b/jedi/common.py @@ -128,8 +128,8 @@ def source_to_unicode(source, encoding=None): # UTF-8 byte-order mark return 'utf-8' - first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0) - possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", + first_two_lines = re.match(br'(?:[^\n]*\n){0,2}', source).group(0) + possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) @@ -141,8 +141,11 @@ def source_to_unicode(source, encoding=None): # only cast str/bytes return source + encoding = detect_encoding() + if not isinstance(encoding, unicode): + encoding = unicode(encoding, 'utf-8', 'replace') # cast to unicode by default - return unicode(source, detect_encoding(), 'replace') + return unicode(source, encoding, 'replace') def splitlines(string): @@ -158,3 +161,9 @@ def splitlines(string): def unite(iterable): """Turns a two dimensional array into a one dimensional.""" return set(chain.from_iterable(iterable)) + + +def to_list(func): + def wrapper(*args, **kwargs): + return list(func(*args, **kwargs)) + return wrapper diff --git a/jedi/debug.py b/jedi/debug.py index e67cec27..692f9c82 100644 --- a/jedi/debug.py +++ b/jedi/debug.py @@ -86,14 +86,9 @@ def increase_indent(func): def dbg(message, *args, **kwargs): """ Looks at the stack, to see if a debug message should be printed. """ - if kwargs: - # Python 2 compatibility, because it doesn't understand default args - # after *args. - color = kwargs.get('color') - if color is None: - raise TypeError("debug.dbg doesn't support more named arguments than color") - else: - color = 'GREEN' + # Python 2 compatibility, because it doesn't understand default args + color = kwargs.pop('color', 'GREEN') + assert color if debug_function and enable_notice: frm = inspect.stack()[1] @@ -104,10 +99,15 @@ def dbg(message, *args, **kwargs): debug_function(color, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args)) -def warning(message, *args): +def warning(message, *args, **kwargs): + format = kwargs.pop('format', True) + assert not kwargs + if debug_function and enable_warning: i = ' ' * _debug_indent - debug_function('RED', i + 'warning: ' + message % tuple(u(repr(a)) for a in args)) + if format: + message = message % tuple(u(repr(a)) for a in args) + debug_function('RED', i + 'warning: ' + message) def speed(name): diff --git a/jedi/evaluate/__init__.py b/jedi/evaluate/__init__.py index 0ca47aee..37e0cd1c 100644 --- a/jedi/evaluate/__init__.py +++ b/jedi/evaluate/__init__.py @@ -275,7 +275,7 @@ class Evaluator(object): return self._eval_element_not_cached(element) return self._eval_element_cached(element) - @memoize_default(evaluator_is_first_arg=True) + @memoize_default(default=set(), evaluator_is_first_arg=True) def _eval_element_cached(self, element): return self._eval_element_not_cached(element) diff --git a/jedi/evaluate/analysis.py b/jedi/evaluate/analysis.py index 7b4b0acc..407bc7da 100644 --- a/jedi/evaluate/analysis.py +++ b/jedi/evaluate/analysis.py @@ -91,7 +91,7 @@ def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None): module_path = jedi_obj.get_parent_until().path instance = typ(name, module_path, jedi_obj.start_pos, message) - debug.warning(str(instance)) + debug.warning(str(instance), format=False) evaluator.analysis.append(instance) diff --git a/jedi/evaluate/compiled/fake.py b/jedi/evaluate/compiled/fake.py index 54785acf..9a422c11 100644 --- a/jedi/evaluate/compiled/fake.py +++ b/jedi/evaluate/compiled/fake.py @@ -156,7 +156,7 @@ def get_faked(module, obj, name=None): doc = '"""%s"""' % obj.__doc__ # TODO need escapes. suite = result.children[-1] string = pt.String(pt.zero_position_modifier, doc, (0, 0), '') - new_line = pt.Whitespace('\n', (0, 0), '') + new_line = pt.Newline('\n', (0, 0), '') docstr_node = pt.Node('simple_stmt', [string, new_line]) suite.children.insert(2, docstr_node) return result diff --git a/jedi/evaluate/docstrings.py b/jedi/evaluate/docstrings.py index 3561f619..d2ab34ed 100644 --- a/jedi/evaluate/docstrings.py +++ b/jedi/evaluate/docstrings.py @@ -21,6 +21,7 @@ from textwrap import dedent from jedi.evaluate.cache import memoize_default from jedi.parser import ParserWithRecovery, load_grammar +from jedi.parser.tree import Class from jedi.common import indent_block from jedi.evaluate.iterable import Array, FakeSequence, AlreadyEvaluated @@ -174,13 +175,21 @@ def _execute_array_values(evaluator, array): @memoize_default(None, evaluator_is_first_arg=True) def follow_param(evaluator, param): + def eval_docstring(docstring): + return set( + [p for param_str in _search_param_in_docstr(docstring, str(param.name)) + for p in _evaluate_for_statement_string(evaluator, param_str, module)] + ) func = param.parent_function + module = param.get_parent_until() - return set( - [p for param_str in _search_param_in_docstr(func.raw_doc, - str(param.name)) - for p in _evaluate_for_statement_string(evaluator, param_str, - param.get_parent_until())]) + types = eval_docstring(func.raw_doc) + if func.name.value == '__init__': + cls = func.get_parent_until(Class) + if cls.type == 'classdef': + types |= eval_docstring(cls.raw_doc) + + return types @memoize_default(None, evaluator_is_first_arg=True) diff --git a/jedi/evaluate/helpers.py b/jedi/evaluate/helpers.py index b13e8dcb..27293eda 100644 --- a/jedi/evaluate/helpers.py +++ b/jedi/evaluate/helpers.py @@ -26,8 +26,8 @@ def deep_ast_copy(obj, parent=None, new_elements=None): new_children = [] for child in obj.children: typ = child.type - if typ in ('whitespace', 'operator', 'keyword', 'number', 'string', - 'indent', 'dedent', 'error_leaf'): + if typ in ('newline', 'operator', 'keyword', 'number', 'string', + 'indent', 'dedent', 'endmarker', 'error_leaf'): # At the moment we're not actually copying those primitive # elements, because there's really no need to. The parents are # obviously wrong, but that's not an issue. diff --git a/jedi/evaluate/imports.py b/jedi/evaluate/imports.py index 9f5842bd..d65d897b 100644 --- a/jedi/evaluate/imports.py +++ b/jedi/evaluate/imports.py @@ -68,56 +68,49 @@ class ImportWrapper(tree.Base): @memoize_default() def follow(self, is_goto=False): - if self._evaluator.recursion_detector.push_stmt(self._import): - # check recursion - return set() - + module = self._evaluator.wrap(self._import.get_parent_until()) + import_path = self._import.path_for_name(self._name) + from_import_name = None try: - module = self._evaluator.wrap(self._import.get_parent_until()) - import_path = self._import.path_for_name(self._name) - from_import_name = None - try: - from_names = self._import.get_from_names() - except AttributeError: - # Is an import_name - pass - else: - if len(from_names) + 1 == len(import_path): - # We have to fetch the from_names part first and then check - # if from_names exists in the modules. - from_import_name = import_path[-1] - import_path = from_names + from_names = self._import.get_from_names() + except AttributeError: + # Is an import_name + pass + else: + if len(from_names) + 1 == len(import_path): + # We have to fetch the from_names part first and then check + # if from_names exists in the modules. + from_import_name = import_path[-1] + import_path = from_names - importer = Importer(self._evaluator, tuple(import_path), - module, self._import.level) + importer = Importer(self._evaluator, tuple(import_path), + module, self._import.level) - types = importer.follow() + types = importer.follow() - #if self._import.is_nested() and not self.nested_resolve: - # scopes = [NestedImportModule(module, self._import)] + #if self._import.is_nested() and not self.nested_resolve: + # scopes = [NestedImportModule(module, self._import)] - if from_import_name is not None: - types = set(chain.from_iterable( - self._evaluator.find_types(t, unicode(from_import_name), - is_goto=is_goto) - for t in types)) + if from_import_name is not None: + types = set(chain.from_iterable( + self._evaluator.find_types(t, unicode(from_import_name), + is_goto=is_goto) + for t in types)) - if not types: - path = import_path + [from_import_name] - importer = Importer(self._evaluator, tuple(path), - module, self._import.level) - types = importer.follow() - # goto only accepts `Name` - if is_goto: - types = set(s.name for s in types) - else: + if not types: + path = import_path + [from_import_name] + importer = Importer(self._evaluator, tuple(path), + module, self._import.level) + types = importer.follow() # goto only accepts `Name` if is_goto: types = set(s.name for s in types) + else: + # goto only accepts `Name` + if is_goto: + types = set(s.name for s in types) - debug.dbg('after import: %s', types) - finally: - self._evaluator.recursion_detector.pop_stmt() + debug.dbg('after import: %s', types) return types @@ -285,20 +278,17 @@ class Importer(object): # We can take the first element, because only the os special # case yields multiple modules, which is not important for # further imports. - base = list(bases)[0] + parent_module = list(bases)[0] # This is a huge exception, we follow a nested import # ``os.path``, because it's a very important one in Python # that is being achieved by messing with ``sys.modules`` in # ``os``. if [str(i) for i in import_path] == ['os', 'path']: - return self._evaluator.find_types(base, 'path') + return self._evaluator.find_types(parent_module, 'path') try: - # It's possible that by giving it always the sys path (and not - # the __path__ attribute of the parent, we get wrong results - # and nested namespace packages don't work. But I'm not sure. - paths = base.py__path__(sys_path) + paths = parent_module.py__path__() except AttributeError: # The module is not a package. _add_error(self._evaluator, import_path[-1]) @@ -318,6 +308,7 @@ class Importer(object): _add_error(self._evaluator, import_path[-1]) return set() else: + parent_module = None try: debug.dbg('search_module %s in %s', import_parts[-1], self.file_path) # Override the sys.path. It works only good that way. @@ -337,15 +328,18 @@ class Importer(object): if is_pkg: # In this case, we don't have a file yet. Search for the # __init__ file. - module_path = get_init_path(module_path) + if module_path.endswith(('.zip', '.egg')): + source = module_file.loader.get_source(module_name) + else: + module_path = get_init_path(module_path) elif module_file: source = module_file.read() module_file.close() - if module_file is None and not module_path.endswith('.py'): + if module_file is None and not module_path.endswith(('.py', '.zip', '.egg')): module = compiled.load_module(self._evaluator, module_path) else: - module = _load_module(self._evaluator, module_path, source, sys_path) + module = _load_module(self._evaluator, module_path, source, sys_path, parent_module) if module is None: # The file might raise an ImportError e.g. and therefore not be @@ -408,7 +402,7 @@ class Importer(object): # namespace packages if isinstance(scope, tree.Module) and scope.path.endswith('__init__.py'): - paths = scope.py__path__(self.sys_path_with_modifications()) + paths = scope.py__path__() names += self._get_module_names(paths) if only_modules: @@ -441,10 +435,10 @@ class Importer(object): return names -def _load_module(evaluator, path=None, source=None, sys_path=None): +def _load_module(evaluator, path=None, source=None, sys_path=None, parent_module=None): def load(source): dotted_path = path and compiled.dotted_from_fs_path(path, sys_path) - if path is not None and path.endswith('.py') \ + if path is not None and path.endswith(('.py', '.zip', '.egg')) \ and dotted_path not in settings.auto_import_modules: if source is None: with open(path, 'rb') as f: @@ -454,7 +448,8 @@ def _load_module(evaluator, path=None, source=None, sys_path=None): p = path p = fast.FastParser(evaluator.grammar, common.source_to_unicode(source), p) save_parser(path, p) - return p.module + from jedi.evaluate.representation import ModuleWrapper + return ModuleWrapper(evaluator, p.module, parent_module) if sys_path is None: sys_path = evaluator.sys_path diff --git a/jedi/evaluate/iterable.py b/jedi/evaluate/iterable.py index 3bbd7141..79a1a0e2 100644 --- a/jedi/evaluate/iterable.py +++ b/jedi/evaluate/iterable.py @@ -30,6 +30,7 @@ from jedi.evaluate import helpers from jedi.evaluate.cache import CachedMetaClass, memoize_default from jedi.evaluate import analysis from jedi.evaluate import pep0484 +from jedi import common class IterableWrapper(tree.Base): @@ -180,36 +181,44 @@ class Comprehension(IterableWrapper): """ comp_for = self._get_comp_for() # For nested comprehensions we need to search the last one. + from jedi.evaluate.representation import InstanceElement + node = self._get_comprehension().children[index] + if isinstance(node, InstanceElement): + # This seems to be a strange case that I haven't found a way to + # write tests against. However since it's my new goal to get rid of + # InstanceElement anyway, I don't care. + node = node.var last_comp = list(comp_for.get_comp_fors())[-1] - return helpers.deep_ast_copy(self._get_comprehension().children[index], parent=last_comp) - - @memoize_default() - def _iterate(self): - def nested(comp_fors): - comp_for = comp_fors[0] - input_node = comp_for.children[3] - input_types = evaluator.eval_element(input_node) - - iterated = py__iter__(evaluator, input_types, input_node) - exprlist = comp_for.children[1] - for types in iterated: - evaluator.predefined_if_name_dict_dict[comp_for] = \ - unpack_tuple_to_dict(evaluator, types, exprlist) - try: - for result in nested(comp_fors[1:]): - yield result - except IndexError: - iterated = evaluator.eval_element(self._eval_node()) - if self.type == 'dict': - yield iterated, evaluator.eval_element(self._eval_node(2)) - else: - yield iterated - finally: - del evaluator.predefined_if_name_dict_dict[comp_for] + return helpers.deep_ast_copy(node, parent=last_comp) + def _nested(self, comp_fors): evaluator = self._evaluator - comp_fors = list(self._get_comp_for().get_comp_fors()) - for result in nested(comp_fors): + comp_for = comp_fors[0] + input_node = comp_for.children[3] + input_types = evaluator.eval_element(input_node) + + iterated = py__iter__(evaluator, input_types, input_node) + exprlist = comp_for.children[1] + for i, types in enumerate(iterated): + evaluator.predefined_if_name_dict_dict[comp_for] = \ + unpack_tuple_to_dict(evaluator, types, exprlist) + try: + for result in self._nested(comp_fors[1:]): + yield result + except IndexError: + iterated = evaluator.eval_element(self._eval_node()) + if self.type == 'dict': + yield iterated, evaluator.eval_element(self._eval_node(2)) + else: + yield iterated + finally: + del evaluator.predefined_if_name_dict_dict[comp_for] + + @memoize_default(default=[]) + @common.to_list + def _iterate(self): + comp_fors = tuple(self._get_comp_for().get_comp_fors()) + for result in self._nested(comp_fors): yield result def py__iter__(self): @@ -252,7 +261,7 @@ class ArrayMixin(object): @register_builtin_method('values', type='dict') def _imitate_values(self): items = self.dict_values() - return create_evaluated_sequence_set(self._evaluator, items, type='list') + return create_evaluated_sequence_set(self._evaluator, items, sequence_type='list') #return set([FakeSequence(self._evaluator, [AlreadyEvaluated(items)], 'tuple')]) @register_builtin_method('items', type='dict') @@ -260,7 +269,7 @@ class ArrayMixin(object): items = [set([FakeSequence(self._evaluator, (k, v), 'tuple')]) for k, v in self._items()] - return create_evaluated_sequence_set(self._evaluator, *items, type='list') + return create_evaluated_sequence_set(self._evaluator, *items, sequence_type='list') class ListComprehension(Comprehension, ArrayMixin): @@ -268,7 +277,14 @@ class ListComprehension(Comprehension, ArrayMixin): def py__getitem__(self, index): all_types = list(self.py__iter__()) - return all_types[index] + result = all_types[index] + if isinstance(index, slice): + return create_evaluated_sequence_set( + self._evaluator, + unite(result), + sequence_type='list' + ) + return result class SetComprehension(Comprehension, ArrayMixin): @@ -303,9 +319,7 @@ class DictComprehension(Comprehension, ArrayMixin): (AlreadyEvaluated(keys), AlreadyEvaluated(values)), 'tuple') for keys, values in self._iterate()) - return create_evaluated_sequence_set(self._evaluator, items, type='list') - - + return create_evaluated_sequence_set(self._evaluator, items, sequence_type='list') class GeneratorComprehension(Comprehension, GeneratorMixin): @@ -446,7 +460,9 @@ def create_evaluated_sequence_set(evaluator, *types_order, **kwargs): ``sequence_type`` is a named argument, that doesn't work in Python2. For backwards compatibility reasons, we're now using kwargs. """ - sequence_type = kwargs.get('sequence_type') + sequence_type = kwargs.pop('sequence_type') + assert not kwargs + sets = tuple(AlreadyEvaluated(types) for types in types_order) return set([FakeSequence(evaluator, sets, sequence_type)]) @@ -676,7 +692,7 @@ def _check_array_additions(evaluator, compare_array, module, is_list): # Arguments([AlreadyEvaluated([_ArrayInstance])]) inside # Yeah... I know... It's complicated ;-) node = list(element.var_args.argument_node[0])[0].var_args.trailer - if isinstance(node, er.InstanceElement): + if isinstance(node, er.InstanceElement) or node is None: return node return node.get_parent_until(er.FunctionExecution) @@ -729,11 +745,12 @@ def _check_array_additions(evaluator, compare_array, module, is_list): # Check for recursion. Possible by using 'extend' in # combination with function calls. continue - if compare_array in evaluator.eval_element(power): - # The arrays match. Now add the results - added_types |= check_additions(execution_trailer.children[1], add_name) - - evaluator.recursion_detector.pop_stmt() + try: + if compare_array in evaluator.eval_element(power): + # The arrays match. Now add the results + added_types |= check_additions(execution_trailer.children[1], add_name) + finally: + evaluator.recursion_detector.pop_stmt() # reset settings settings.dynamic_params_for_other_modules = temp_param_add debug.dbg('Dynamic array result %s' % added_types, color='MAGENTA') @@ -777,6 +794,8 @@ class _ArrayInstance(IterableWrapper): yield types module = self.var_args.get_parent_until() + if module is None: + return is_list = str(self.instance.name) == 'list' additions = _check_array_additions(self._evaluator, self.instance, module, is_list) if additions: diff --git a/jedi/evaluate/recursion.py b/jedi/evaluate/recursion.py index a95ba3e0..9b95fad5 100644 --- a/jedi/evaluate/recursion.py +++ b/jedi/evaluate/recursion.py @@ -15,7 +15,6 @@ from jedi.evaluate import iterable def recursion_decorator(func): def run(evaluator, stmt, *args, **kwargs): rec_detect = evaluator.recursion_detector - # print stmt, len(self.node_statements()) if rec_detect.push_stmt(stmt): return set() else: diff --git a/jedi/evaluate/representation.py b/jedi/evaluate/representation.py index a95e5451..4502d10f 100644 --- a/jedi/evaluate/representation.py +++ b/jedi/evaluate/representation.py @@ -472,7 +472,10 @@ class Class(use_metaclass(CachedMetaClass, Wrapper)): @property def params(self): - return self.get_subscope_by_name('__init__').params + try: + return self.get_subscope_by_name('__init__').params + except KeyError: + return [] # object.__init__ def names_dicts(self, search_global, is_instance=False): if search_global: @@ -491,7 +494,7 @@ class Class(use_metaclass(CachedMetaClass, Wrapper)): for s in self.py__mro__(): for sub in reversed(s.subscopes): if sub.name.value == name: - return sub + return sub raise KeyError("Couldn't find subscope.") def __getattr__(self, name): @@ -803,9 +806,10 @@ class GlobalName(helpers.FakeName): class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)): - def __init__(self, evaluator, module): + def __init__(self, evaluator, module, parent_module=None): self._evaluator = evaluator self.base = self._module = module + self._parent_module = parent_module def names_dicts(self, search_global): yield self.base.names_dict @@ -851,6 +855,10 @@ class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)): return helpers.FakeName(unicode(self.base.name), self, (1, 0)) def _get_init_directory(self): + """ + :return: The path to the directory of a package. None in case it's not + a package. + """ for suffix, _, _ in imp.get_suffixes(): ending = '__init__' + suffix py__file__ = self.py__file__() @@ -881,6 +889,30 @@ class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)): else: return self.py__name__() + def _py__path__(self): + if self._parent_module is None: + search_path = self._evaluator.sys_path + else: + search_path = self._parent_module.py__path__() + init_path = self.py__file__() + if os.path.basename(init_path) == '__init__.py': + with open(init_path, 'rb') as f: + content = common.source_to_unicode(f.read()) + # these are strings that need to be used for namespace packages, + # the first one is ``pkgutil``, the second ``pkg_resources``. + options = ('declare_namespace(__name__)', 'extend_path(__path__') + if options[0] in content or options[1] in content: + # It is a namespace, now try to find the rest of the + # modules on sys_path or whatever the search_path is. + paths = set() + for s in search_path: + other = os.path.join(s, unicode(self.name)) + if os.path.isdir(other): + paths.add(other) + return list(paths) + # Default to this. + return [self._get_init_directory()] + @property def py__path__(self): """ @@ -893,33 +925,12 @@ class ModuleWrapper(use_metaclass(CachedMetaClass, tree.Module, Wrapper)): is a list of paths (strings). Raises an AttributeError if the module is not a package. """ - def return_value(search_path): - init_path = self.py__file__() - if os.path.basename(init_path) == '__init__.py': - - with open(init_path, 'rb') as f: - content = common.source_to_unicode(f.read()) - # these are strings that need to be used for namespace packages, - # the first one is ``pkgutil``, the second ``pkg_resources``. - options = ('declare_namespace(__name__)', 'extend_path(__path__') - if options[0] in content or options[1] in content: - # It is a namespace, now try to find the rest of the - # modules on sys_path or whatever the search_path is. - paths = set() - for s in search_path: - other = os.path.join(s, unicode(self.name)) - if os.path.isdir(other): - paths.add(other) - return list(paths) - # Default to this. - return [path] - path = self._get_init_directory() if path is None: raise AttributeError('Only packages have __path__ attributes.') else: - return return_value + return self._py__path__ @memoize_default() def _sub_modules_dict(self): diff --git a/jedi/parser/__init__.py b/jedi/parser/__init__.py index 8fb915a5..87e38ae9 100644 --- a/jedi/parser/__init__.py +++ b/jedi/parser/__init__.py @@ -214,12 +214,14 @@ class Parser(object): return pt.String(self.position_modifier, value, start_pos, prefix) elif type == NUMBER: return pt.Number(self.position_modifier, value, start_pos, prefix) - elif type in (NEWLINE, ENDMARKER): - return pt.Whitespace(self.position_modifier, value, start_pos, prefix) + elif type == NEWLINE: + return pt.Newline(self.position_modifier, value, start_pos, prefix) elif type == INDENT: return pt.Indent(self.position_modifier, value, start_pos, prefix) elif type == DEDENT: return pt.Dedent(self.position_modifier, value, start_pos, prefix) + elif type == ENDMARKER: + return pt.EndMarker(self.position_modifier, value, start_pos, prefix) else: return pt.Operator(self.position_modifier, value, start_pos, prefix) @@ -287,7 +289,8 @@ class ParserWithRecovery(Parser): :param module_path: The path of the module in the file system, may be None. :type module_path: str """ - def __init__(self, grammar, source, module_path=None, tokenizer=None): + def __init__(self, grammar, source, module_path=None, tokenizer=None, + start_parsing=True): self.syntax_errors = [] self._omit_dedent_list = [] @@ -302,12 +305,16 @@ class ParserWithRecovery(Parser): # if self.options["print_function"]: # python_grammar = pygram.python_grammar_no_print_statement # else: - super(ParserWithRecovery, self).__init__(grammar, source, tokenizer=tokenizer) - - self.module = self._parsed - self.module.used_names = self._used_names - self.module.path = module_path - self.module.global_names = self._global_names + super(ParserWithRecovery, self).__init__( + grammar, source, + tokenizer=tokenizer, + start_parsing=start_parsing + ) + if start_parsing: + self.module = self._parsed + self.module.used_names = self._used_names + self.module.path = module_path + self.module.global_names = self._global_names def parse(self, tokenizer): return super(ParserWithRecovery, self).parse(self._tokenize(self._tokenize(tokenizer))) diff --git a/jedi/parser/fast.py b/jedi/parser/fast.py index 520b9d81..5c244402 100644 --- a/jedi/parser/fast.py +++ b/jedi/parser/fast.py @@ -120,6 +120,10 @@ class ParserNode(object): self.source = source self.hash = hash(source) self.parser = parser + if source: + self._end_pos = parser.module.end_pos + else: + self._end_pos = 1, 0 try: # With fast_parser we have either 1 subscope or only statements. @@ -162,6 +166,10 @@ class ParserNode(object): # There's no module yet. return '<%s: empty>' % type(self).__name__ + @property + def end_pos(self): + return self._end_pos[0] + self.parser.position_modifier.line, self._end_pos[1] + def reset_node(self): """ Removes changes that were applied in this class. @@ -188,6 +196,10 @@ class ParserNode(object): # Need to insert the own node as well. dcts.insert(0, self._content_scope.names_dict) self._content_scope.names_dict = MergedNamesDict(dcts) + endmarker = self.parser.get_parsed_node().children[-1] + assert endmarker.type == 'endmarker' + last_parser = self._node_children[-1].parser + endmarker.start_pos = last_parser.get_parsed_node().end_pos @property def _indent(self): @@ -414,7 +426,7 @@ class FastParser(use_metaclass(CachedFastParser)): # called - just ignore it. src = ''.join(self._lines[code_part_end_line - 1:]) self._parse_part(code_part, src, code_part_end_line, nodes) - last_end_line = self.current_node.parser.module.end_pos[0] + last_end_line = self.current_node.end_pos[0] debug.dbg("While parsing %s, starting with line %s wasn't included in split.", self.module_path, code_part_end_line) #assert code_part_end_line > last_end_line @@ -426,7 +438,7 @@ class FastParser(use_metaclass(CachedFastParser)): code_part_end_line = next_code_part_end_line start += len(code_part) - last_end_line = self.current_node.parser.module.end_pos[0] + last_end_line = self.current_node.end_pos[0] if added_newline: self.current_node.remove_last_newline() diff --git a/jedi/parser/tree.py b/jedi/parser/tree.py index 6013f36a..c5a2d554 100644 --- a/jedi/parser/tree.py +++ b/jedi/parser/tree.py @@ -147,10 +147,12 @@ class Base(object): return scope def get_definition(self): + if self.type in ('newline', 'dedent', 'indent', 'endmarker'): + raise ValueError('Cannot get the indentation of whitespace or indentation.') scope = self while scope.parent is not None: parent = scope.parent - if scope.isinstance(Node, Name) and parent.type != 'simple_stmt': + if scope.isinstance(Node, Leaf) and parent.type != 'simple_stmt': if scope.type == 'testlist_comp': try: if isinstance(scope.children[1], CompFor): @@ -292,7 +294,11 @@ class Leaf(Base): def get_start_pos_of_prefix(self): try: - return self.get_previous_leaf().end_pos + previous_leaf = self + while True: + previous_leaf = previous_leaf.get_previous_leaf() + if previous_leaf.type not in ('indent', 'dedent'): + return previous_leaf.end_pos except IndexError: return 1, 0 # It's the first leaf. @@ -348,10 +354,15 @@ class LeafWithNewLines(Leaf): return "<%s: %r>" % (type(self).__name__, self.value) -class Whitespace(LeafWithNewLines): +class EndMarker(Leaf): + __slots__ = () + type = 'endmarker' + + +class Newline(LeafWithNewLines): """Contains NEWLINE and ENDMARKER tokens.""" __slots__ = () - type = 'whitespace' + type = 'newline' @utf8_repr def __repr__(self): @@ -421,7 +432,7 @@ class Indent(Leaf): class Dedent(Leaf): - type = 'indent' + type = 'dedent' __slots__ = () @@ -539,6 +550,10 @@ class BaseNode(Base): try: return c.get_leaf_for_position(position, include_prefixes) except AttributeError: + while c.type in ('indent', 'dedent'): + # We'd rather not have indents and dedents as a leaf, + # because they don't contain indentation information. + c = c.get_next_leaf() return c return None diff --git a/jedi/utils.py b/jedi/utils.py index 848d8571..2502620c 100644 --- a/jedi/utils.py +++ b/jedi/utils.py @@ -74,7 +74,7 @@ def setup_readline(namespace_module=__main__): lines = common.splitlines(text) position = (len(lines), len(lines[-1])) - name = get_on_completion_name(lines, position) + name = get_on_completion_name(interpreter._get_module(), lines, position) before = text[:len(text) - len(name)] completions = interpreter.completions() finally: diff --git a/test/completion/arrays.py b/test/completion/arrays.py index 938b9fdf..dcbbe379 100644 --- a/test/completion/arrays.py +++ b/test/completion/arrays.py @@ -311,6 +311,17 @@ for i in 0, 2: #? int() str() GetItemWithList()[i] + +# With super +class SuperYeah(list): + def __getitem__(self, index): + return super()[index] + +#? +SuperYeah([1])[0] +#? +SuperYeah()[0] + # ----------------- # conversions # ----------------- @@ -368,20 +379,6 @@ for i in set(a for a in [1]): i -# ----------------- -# Recursions -# ----------------- - -def to_list(iterable): - return list(set(iterable)) - - -def recursion1(foo): - return to_list(to_list(foo)) + recursion1(foo) - -#? int() -recursion1([1,2])[0] - # ----------------- # Merged Arrays # ----------------- diff --git a/test/completion/basic.py b/test/completion/basic.py index 9b98fbaf..5b4f9091 100644 --- a/test/completion/basic.py +++ b/test/completion/basic.py @@ -190,6 +190,16 @@ def a(): #? # str literals in comment """ upper +def completion_in_comment(): + #? ['Exception'] + # might fail because the comment is not a leaf: Exception + pass + +some_word +#? ['Exception'] +# Very simple comment completion: Exception +# Commment after it + # ----------------- # magic methods # ----------------- diff --git a/test/completion/classes.py b/test/completion/classes.py index 35b0619c..b4f67a1f 100644 --- a/test/completion/classes.py +++ b/test/completion/classes.py @@ -294,20 +294,6 @@ class A(): #? list() A().b() -# ----------------- -# recursions -# ----------------- -def Recursion(): - def recurse(self): - self.a = self.a - self.b = self.b.recurse() - -#? -Recursion().a - -#? -Recursion().b - # ----------------- # ducktyping # ----------------- diff --git a/test/completion/completion.py b/test/completion/completion.py new file mode 100644 index 00000000..91bc2aa5 --- /dev/null +++ b/test/completion/completion.py @@ -0,0 +1,26 @@ +""" +Special cases of completions (typically special positions that caused issues +with context parsing. +""" + +def pass_decorator(func): + return func + + +def x(): + return ( + 1, +#? ["tuple"] +tuple + ) + + # Comment just somewhere + + +class MyClass: + @pass_decorator + def x(foo, +#? 5 ["tuple"] +tuple, + ): + return 1 diff --git a/test/completion/comprehensions.py b/test/completion/comprehensions.py index 78e4f4b3..a0a709c7 100644 --- a/test/completion/comprehensions.py +++ b/test/completion/comprehensions.py @@ -173,4 +173,28 @@ def x(): [a for a in h if hio] if hio: pass +# ----------------- +# slices +# ----------------- +#? list() +foo = [x for x in [1, '']][:1] +#? int() +foo[0] + +# ----------------- +# In class +# ----------------- + +class X(): + def __init__(self, bar): + self.bar = bar + + def foo(self): + x = [a for a in self.bar][0] + #? int() + x + return x + +#? int() +X([1]).foo() diff --git a/test/completion/docstring.py b/test/completion/docstring.py index 74667191..c14188c4 100644 --- a/test/completion/docstring.py +++ b/test/completion/docstring.py @@ -177,3 +177,44 @@ d = '' """ bsdf """ #? str() d.upper() + +# ----------------- +# class docstrings +# ----------------- + +class InInit(): + def __init__(self, foo): + """ + :type foo: str + """ + #? str() + foo + + +class InClass(): + """ + :type foo: str + """ + def __init__(self, foo): + #? str() + foo + + +class InBoth(): + """ + :type foo: str + """ + def __init__(self, foo): + """ + :type foo: int + """ + #? str() int() + foo + + +def __init__(foo): + """ + :type foo: str + """ + #? str() + foo diff --git a/test/completion/goto.py b/test/completion/goto.py index 4e178af1..ef9938cf 100644 --- a/test/completion/goto.py +++ b/test/completion/goto.py @@ -102,6 +102,13 @@ def f(t=None): #! 9 ['t=None'] t = t or 1 + +class X(): + pass + +#! 3 [] +X(foo=x) + # ----------------- # imports # ----------------- diff --git a/test/completion/recursion.py b/test/completion/recursion.py new file mode 100644 index 00000000..77e5b24f --- /dev/null +++ b/test/completion/recursion.py @@ -0,0 +1,51 @@ +""" +Code that might cause recursion issues (or has caused in the past). +""" + +def Recursion(): + def recurse(self): + self.a = self.a + self.b = self.b.recurse() + +#? +Recursion().a + +#? +Recursion().b + + +class X(): + def __init__(self): + self.recursive = [1, 3] + + def annoying(self): + self.recursive = [self.recursive[0]] + + def recurse(self): + self.recursive = [self.recursive[1]] + +#? int() +X().recursive[0] + + +def to_list(iterable): + return list(set(iterable)) + + +def recursion1(foo): + return to_list(to_list(foo)) + recursion1(foo) + +#? int() +recursion1([1,2])[0] + + +class FooListComp(): + def __init__(self): + self.recursive = [1] + + def annoying(self): + self.recursive = [x for x in self.recursive] + + +#? int() +FooListComp().recursive[0] diff --git a/test/test_api/test_api.py b/test/test_api/test_api.py index fc961872..5658540f 100644 --- a/test/test_api/test_api.py +++ b/test/test_api/test_api.py @@ -83,7 +83,7 @@ def test_completion_on_hex_literals(): _check_number('0x1.', 'int') # hexdecimal # Completing binary literals doesn't work if they are not actually binary # (invalid statements). - assert api.Script('0b2.').completions() == [] + assert api.Script('0b2.b').completions() == [] _check_number('0b1.', 'int') # binary _check_number('0x2e.', 'int') @@ -98,8 +98,10 @@ def test_completion_on_complex_literals(): _check_number('1j.', 'complex') _check_number('44.j.', 'complex') _check_number('4.0j.', 'complex') - # No dot no completion - assert api.Script('4j').completions() == [] + # No dot no completion - I thought, but 4j is actually a literall after + # which a keyword like or is allowed. Good times, haha! + assert (set([c.name for c in api.Script('4j').completions()]) == + set(['if', 'and', 'in', 'is', 'not', 'or'])) def test_goto_assignments_on_non_name(): diff --git a/test/test_api/test_completion_context.py b/test/test_api/test_completion_context.py new file mode 100644 index 00000000..fee4f0e1 --- /dev/null +++ b/test/test_api/test_completion_context.py @@ -0,0 +1,9 @@ +from textwrap import dedent +from jedi import Script + + +def test_in_whitespace(): + code = dedent(''' + def x(): + pass''') + assert len(Script(code, column=2).completions()) > 20 diff --git a/test/test_evaluate/nested_namespaces/__init__.py b/test/test_evaluate/nested_namespaces/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_evaluate/nested_namespaces/namespace/__init__.py b/test/test_evaluate/nested_namespaces/namespace/__init__.py new file mode 100644 index 00000000..42e33a76 --- /dev/null +++ b/test/test_evaluate/nested_namespaces/namespace/__init__.py @@ -0,0 +1,4 @@ +try: + __import__('pkg_resources').declare_namespace(__name__) +except ImportError: + pass diff --git a/test/test_evaluate/nested_namespaces/namespace/pkg/__init__.py b/test/test_evaluate/nested_namespaces/namespace/pkg/__init__.py new file mode 100644 index 00000000..3c378205 --- /dev/null +++ b/test/test_evaluate/nested_namespaces/namespace/pkg/__init__.py @@ -0,0 +1 @@ +CONST = 1 diff --git a/test/test_evaluate/test_imports.py b/test/test_evaluate/test_imports.py index 98c8baf2..8eb23874 100644 --- a/test/test_evaluate/test_imports.py +++ b/test/test_evaluate/test_imports.py @@ -4,7 +4,7 @@ import sys import pytest import jedi -from jedi._compatibility import find_module_py33 +from jedi._compatibility import find_module_py33, find_module from ..helpers import cwd_at @@ -14,6 +14,44 @@ def test_find_module_py33(): assert find_module_py33('_io') == (None, '_io', False) +def test_find_module_package(): + file, path, is_package = find_module('json') + assert file is None + assert path.endswith('json') + assert is_package is True + + +def test_find_module_not_package(): + file, path, is_package = find_module('io') + assert file is not None + assert path.endswith('io.py') + assert is_package is False + + +def test_find_module_package_zipped(): + if 'zipped_imports/pkg.zip' not in sys.path: + sys.path.append(os.path.join(os.path.dirname(__file__), + 'zipped_imports/pkg.zip')) + file, path, is_package = find_module('pkg') + assert file is not None + assert path.endswith('pkg.zip') + assert is_package is True + assert len(jedi.Script('import pkg; pkg.mod', 1, 19).completions()) == 1 + + +@pytest.mark.skipif('sys.version_info < (2,7)') +def test_find_module_not_package_zipped(): + if 'zipped_imports/not_pkg.zip' not in sys.path: + sys.path.append(os.path.join(os.path.dirname(__file__), + 'zipped_imports/not_pkg.zip')) + file, path, is_package = find_module('not_pkg') + assert file is not None + assert path.endswith('not_pkg.zip') + assert is_package is False + assert len( + jedi.Script('import not_pkg; not_pkg.val', 1, 27).completions()) == 1 + + @cwd_at('test/test_evaluate/not_in_sys_path/pkg') def test_import_not_in_sys_path(): """ diff --git a/test/test_evaluate/test_namespace_package.py b/test/test_evaluate/test_namespace_package.py index 79993f82..3beed65a 100644 --- a/test/test_evaluate/test_namespace_package.py +++ b/test/test_evaluate/test_namespace_package.py @@ -51,3 +51,15 @@ def test_namespace_package(): completion = c solution = "statement: foo = '%s'" % solution assert completion.description == solution + + +def test_nested_namespace_package(): + CODE = 'from nested_namespaces.namespace.pkg import CONST' + + sys_path = [dirname(__file__)] + + script = jedi.Script(sys_path=sys_path, source=CODE, line=1, column=45) + + result = script.goto_definitions() + + assert len(result) == 1 diff --git a/test/test_evaluate/zipped_imports/not_pkg.zip b/test/test_evaluate/zipped_imports/not_pkg.zip new file mode 100644 index 00000000..f1516a6a Binary files /dev/null and b/test/test_evaluate/zipped_imports/not_pkg.zip differ diff --git a/test/test_evaluate/zipped_imports/pkg.zip b/test/test_evaluate/zipped_imports/pkg.zip new file mode 100644 index 00000000..ec8eac4d Binary files /dev/null and b/test/test_evaluate/zipped_imports/pkg.zip differ diff --git a/test/test_regression.py b/test/test_regression.py index 5452349e..abb10727 100644 --- a/test/test_regression.py +++ b/test/test_regression.py @@ -14,6 +14,7 @@ import jedi from jedi._compatibility import u from jedi import Script from jedi import api +from jedi import common from jedi.evaluate import imports from jedi.parser import ParserWithRecovery, load_grammar @@ -179,6 +180,15 @@ class TestRegression(TestCase): else: assert n == limit + def test_source_to_unicode_unicode_text(self): + source = ( + b"# vim: fileencoding=utf-8\n" + b"# \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\n" + ) + actual = common.source_to_unicode(source) + expected = source.decode('utf-8') + assert actual == expected + def test_loading_unicode_files_with_bad_global_charset(monkeypatch, tmpdir): dirname = str(tmpdir.mkdir('jedi-test'))