diff --git a/jedi/api/__init__.py b/jedi/api/__init__.py index faea9d02..84b22ad1 100644 --- a/jedi/api/__init__.py +++ b/jedi/api/__init__.py @@ -129,6 +129,14 @@ class Script(object): :rtype: list of :class:`classes.Completion` """ def get_completions(user_stmt, bs): + if user_stmt is None: + module = self._parser.module() + importer = helpers.check_error_statements( + self._evaluator, module, self._pos + ) + print(importer.completion_names(self._evaluator, True)) + return [(name, module) for name in importer.completion_names(self._evaluator, True)] + # TODO DELETE still needed? if isinstance(user_stmt, pr.Import): context = self._user_context.get_context() next(context) # skip the path @@ -155,6 +163,7 @@ class Script(object): path, dot, like = helpers.completion_parts(path) user_stmt = self._parser.user_stmt_with_whitespace() + b = compiled.builtin completions = get_completions(user_stmt, b) diff --git a/jedi/api/helpers.py b/jedi/api/helpers.py index dbb9af38..d6d93abd 100644 --- a/jedi/api/helpers.py +++ b/jedi/api/helpers.py @@ -31,3 +31,21 @@ def get_on_import_stmt(evaluator, user_context, user_stmt, is_like_search=False) i = imports.ImportWrapper(evaluator, name) return i, name + + +def check_error_statements(evaluator, module, pos): + for error_statement in module.error_statement_stacks: + if error_statement.first_type in ('import_from' or 'import_name') \ + and error_statement.first_pos < pos <= error_statement.next_start_pos: + return importer_from_error_statement(evaluator, module, error_statement, pos) + return None + + +def importer_from_error_statement(evaluator, module, error_statement, pos): + names = [] + level = 0 + for typ, nodes in error_statement.stack: + if typ == 'dotted_name': + names += nodes[::2] + + return imports.get_importer(evaluator, names, module, level) diff --git a/jedi/evaluate/helpers.py b/jedi/evaluate/helpers.py index 4026687a..25f8873e 100644 --- a/jedi/evaluate/helpers.py +++ b/jedi/evaluate/helpers.py @@ -318,6 +318,8 @@ def statement_elements_in_statement(stmt): class FakeSubModule(): line_offset = 0 + parent = None + path = None class FakeArray(pr.Array): diff --git a/jedi/evaluate/imports.py b/jedi/evaluate/imports.py index 10d31425..3f268cff 100644 --- a/jedi/evaluate/imports.py +++ b/jedi/evaluate/imports.py @@ -376,6 +376,7 @@ def get_importer(evaluator, import_path, module, level=0): Checks the evaluator caches first, which resembles the ``sys.modules`` cache and speeds up libraries like ``numpy``. """ + import_path = tuple(import_path) # We use it as hash in the import cache. if level != 0: # Only absolute imports should be cached. Otherwise we have a mess. # TODO Maybe calculate the absolute import and save it here? @@ -594,6 +595,94 @@ class _Importer(object): else: return _load_module(self._evaluator, name=path, sys_path=sys_path), rest + def _generate_name(self, name): + return helpers.FakeName(name, parent=FakeSubModule) + + def _get_module_names(self, search_path=None): + """ + Get the names of all modules in the search_path. This means file names + and not names defined in the files. + """ + + names = [] + # add builtin module names + if search_path is None: + names += [self._generate_name(name) for name in sys.builtin_module_names] + + if search_path is None: + search_path = self.sys_path_with_modifications() + for module_loader, name, is_pkg in pkgutil.iter_modules(search_path): + names.append(self._generate_name(name)) + return names + + def completion_names(self, evaluator, only_modules=False): + """ + :param only_modules: Indicates wheter it's possible to import a + definition that is not defined in a module. + """ + names = [] + if not self.import_path: # Empty import path=completion after import + if not self._is_relative_import(): + names += self._get_module_names() + + if self._importer.file_path is not None: + path = os.path.abspath(self._importer.file_path) + for i in range(self.import_stmt.relative_count - 1): + path = os.path.dirname(path) + names += self._get_module_names([path]) + + if self._is_relative_import(): + rel_path = os.path.join(self._importer.get_relative_path(), + '__init__.py') + if os.path.exists(rel_path): + m = _load_module(self._evaluator, rel_path) + names += m.get_defined_names() + + for scope in self.follow(evaluator): + # flask + if self.import_path == ('flask', 'ext'): + # List Flask extensions like ``flask_foo`` + for mod in self._get_module_names(): + modname = str(mod) + if modname.startswith('flask_'): + extname = modname[len('flask_'):] + names.append(self._generate_name(extname)) + # Now the old style: ``flaskext.foo`` + for dir in self._importer.sys_path_with_modifications(): + flaskext = os.path.join(dir, 'flaskext') + if os.path.isdir(flaskext): + names += self._get_module_names([flaskext]) + + # TODO delete + # namespace packages + if isinstance(scope, pr.Module) and scope.path.endswith('__init__.py'): + pkg_path = os.path.dirname(scope.path) + paths = self.namespace_packages(pkg_path, self.import_path) + names += self._get_module_names([pkg_path] + paths) + + if only_modules: + # In the case of an import like `from x.` we don't need to + # add all the variables. + if ('os',) == self.import_path and not self._is_relative_import(): + # os.path is a hardcoded exception, because it's a + # ``sys.modules`` modification. + names.append(self._generate_name('path')) + continue + + if False and not self.import_stmt.from_names or False and self.is_partial_import: + # from_names must be defined to access module + # values plus a partial import means that there + # is something after the import, which + # automatically implies that there must not be + # any non-module scope. + continue + from jedi.evaluate import finder + for s, scope_names in finder.get_names_of_scope(self._evaluator, + scope, include_builtin=False): + for n in scope_names: + names.append(n) + return names + def follow_imports(evaluator, scopes): """ diff --git a/jedi/parser/__init__.py b/jedi/parser/__init__.py index 6d31505c..ce054318 100644 --- a/jedi/parser/__init__.py +++ b/jedi/parser/__init__.py @@ -42,6 +42,23 @@ def load_grammar(file): return _loaded_grammars.setdefault(path, pgen2.load_grammar(path)) +class ErrorStatement(object): + def __init__(self, stack, next_token, next_start_pos): + self.stack = stack + self.next_token = next_token + self.next_start_pos = next_start_pos + + @property + def first_pos(self): + first_type, nodes = self.stack[0] + return nodes[0].start_pos + + @property + def first_type(self): + first_type, nodes = self.stack[0] + return first_type + + class Parser(object): """ This class is used to parse a Python file, it then divides them into a @@ -104,7 +121,7 @@ class Parser(object): # and only if the refactor method's write parameter was True. self.used_names = {} self.scope_names_stack = [{}] - self.failed_statement_stacks = [] + self.error_statement_stacks = [] logger = logging.getLogger("Jedi-Parser") d = pgen2.Driver(grammar, self.convert_node, self.convert_leaf, self.error_recovery, logger=logger) @@ -113,6 +130,7 @@ class Parser(object): self.module.used_names = self.used_names self.module.path = module_path self.module.set_global_names(self.global_names) + self.module.error_statement_stacks = self.error_statement_stacks self.grammar_symbols = grammar.number2symbol def convert_node(self, grammar, type, children): @@ -176,7 +194,7 @@ class Parser(object): else: return pt.Operator(value, start_pos, prefix) - def error_recovery(self, grammar, stack, type, value): + def error_recovery(self, grammar, stack, typ, value, start_pos): """ This parser is written in a dynamic way, meaning that this parser allows using different grammars (even non-Python). However, error @@ -191,9 +209,9 @@ class Parser(object): index = i break # No success finding a transition - self._stack_removal(grammar, stack, index + 1) + self._stack_removal(grammar, stack, index + 1, value, start_pos) - def _stack_removal(self, grammar, stack, start_index): + def _stack_removal(self, grammar, stack, start_index, value, start_pos): def clear_names(children): for c in children: try: @@ -214,7 +232,8 @@ class Parser(object): if found: symbol = grammar.number2symbol[typ] failed_stack.append((symbol, nodes)) - self.failed_statement_stacks.append(failed_stack) + err = ErrorStatement(failed_stack, value, start_pos) + self.error_statement_stacks.append(err) for dfa, state, node in stack[start_index:]: clear_names(children=node[1]) diff --git a/jedi/parser/pgen2/parse.py b/jedi/parser/pgen2/parse.py index 1f68830f..4a38b72e 100644 --- a/jedi/parser/pgen2/parse.py +++ b/jedi/parser/pgen2/parse.py @@ -141,7 +141,7 @@ class Parser(object): # Done parsing, but another token is input raise ParseError("too much input", type, value, start_pos) else: - self.error_recovery(self.grammar, self.stack, type, value) + self.error_recovery(self.grammar, self.stack, type, value, start_pos) break def classify(self, type, value, start_pos): diff --git a/jedi/parser/tree.py b/jedi/parser/tree.py index b86c08f6..1ea87819 100644 --- a/jedi/parser/tree.py +++ b/jedi/parser/tree.py @@ -620,7 +620,7 @@ class SubModule(Scope, Module): of a module. """ __slots__ = ('path', 'global_names', 'used_names', - 'line_offset', 'use_as_parent') + 'line_offset', 'use_as_parent', 'failed_statement_stacks') def __init__(self, children): """