diff --git a/jedi/evaluate/finder.py b/jedi/evaluate/finder.py index 8b91679b..88c35b71 100644 --- a/jedi/evaluate/finder.py +++ b/jedi/evaluate/finder.py @@ -65,6 +65,13 @@ class NameFinder(object): return iter([(self.scope, self.scope.get_magic_function_names())]) return self.scope.scope_names_generator(self.position) + def names_dict_lookup(self, scope): + try: + names = scope.names_dict[str(self.name_str)] + except KeyError: + return [] + return [name for name in names if name.is_definition()] + def filter_name(self, scope_names_generator, search_global=False): """ Filters all variables of a scope (which are defined in the @@ -76,7 +83,12 @@ class NameFinder(object): names = [] self.maybe_descriptor = isinstance(self.scope, er.Class) for name_list_scope, name_list in scope_names_generator: - print(name_list_scope, hasattr(name_list_scope, 'names_dict')) + if hasattr(name_list_scope, 'names_dict'): + names = self.names_dict_lookup(name_list_scope) + if names: + break + continue + break_scopes = [] if not isinstance(name_list_scope, compiled.CompiledObject): # Here is the position stuff happening (sorting of variables). diff --git a/jedi/parser/__init__.py b/jedi/parser/__init__.py index 6518e851..f16ff9e0 100644 --- a/jedi/parser/__init__.py +++ b/jedi/parser/__init__.py @@ -60,7 +60,8 @@ class Parser(object): self.used_names = {} self.scope_names_stack = [{}] logger = logging.getLogger("Jedi-Parser") - d = Driver(pytree.python_grammar, convert=self.convert, logger=logger) + d = Driver(pytree.python_grammar, self.convert, + self.error_recovery, logger=logger) self.module = d.parse_string(source).get_parent_until() self.module.used_names = self.used_names @@ -96,6 +97,37 @@ class Parser(object): new_node.names_dict = scope_names return new_node + def error_recovery(self, grammar, stack, type, value): + """ + This parser is written in a dynamic way, meaning that this parser + allows using different grammars (even non-Python). However, error + recovery is purely written for Python. + """ + if value == '\n': # Statement is not finished. + # Now remove the whole statement. + for i, (dfa, state, node) in reversed(list(enumerate(stack))): + symbol, _, _, _ = node + + # `suite` can sometimes be only simple_stmt, not stmt. + if symbol in (grammar.symbol2number['simple_stmt'], + grammar.symbol2number['stmt']): + index = i + stack[index:] = [] + else: + # For now just discard everything that is not a suite or + # file_input, if we detect an error. + for i, (dfa, state, node) in reversed(list(enumerate(stack))): + symbol, _, _, _ = node + + # `suite` can sometimes be only simple_stmt, not stmt. + if symbol in (grammar.symbol2number['file_input'], + grammar.symbol2number['suite']): + index = i + break + stack[index + 1:] = [] + # No success finding a transition + #raise ParseError("bad input", type, value, context) + def __init__old__(self, source, module_path=None, no_docstr=False, tokenizer=None, top_module=None): self.no_docstr = no_docstr diff --git a/jedi/parser/pgen2/__init__.py b/jedi/parser/pgen2/__init__.py index b53c7a4c..6e5458f8 100644 --- a/jedi/parser/pgen2/__init__.py +++ b/jedi/parser/pgen2/__init__.py @@ -21,17 +21,18 @@ from . import tokenize class Driver(object): - def __init__(self, grammar, convert=None, logger=None): + def __init__(self, grammar, convert, error_recovery, logger=None): self.grammar = grammar if logger is None: logger = logging.getLogger() self.logger = logger self.convert = convert + self.error_recovery = error_recovery def parse_tokens(self, tokens): """Parse a series of tokens and return the syntax tree.""" # XXX Move the prefix computation into a wrapper around tokenize. - p = parse.Parser(self.grammar, self.convert) + p = parse.Parser(self.grammar, self.convert, self.error_recovery) lineno = 1 column = 0 type = value = start = end = line_text = None diff --git a/jedi/parser/pgen2/parse.py b/jedi/parser/pgen2/parse.py index b81630cb..7a533890 100644 --- a/jedi/parser/pgen2/parse.py +++ b/jedi/parser/pgen2/parse.py @@ -56,7 +56,7 @@ class Parser(object): """ - def __init__(self, grammar, convert=None): + def __init__(self, grammar, convert, error_recovery): """Constructor. The grammar argument is a grammar.Grammar instance; see the @@ -97,6 +97,7 @@ class Parser(object): stackentry = (self.grammar.dfas[start], 0, newnode) self.stack = [stackentry] self.rootnode = None + self.error_recovery = error_recovery def addtoken(self, type, value, context): """Add a token; return True iff this is the end of the program.""" @@ -143,7 +144,7 @@ class Parser(object): raise ParseError("too much input", type, value, context) else: - self.error_recovery(type, value, context) + self.error_recovery(self.grammar, self.stack, type, value) break def classify(self, type, value, context): @@ -184,34 +185,3 @@ class Parser(object): node[-1].append(newnode) else: self.rootnode = newnode - - def error_recovery(self, type, value, context): - """ - This parser is written in a dynamic way, meaning that this parser - allows using different grammars (even non-Python). However, error - recovery is purely written for Python. - """ - if value == '\n': # Statement is not finished. - # Now remove the whole statement. - for i, (dfa, state, node) in reversed(list(enumerate(self.stack))): - symbol, _, _, _ = node - - # `suite` can sometimes be only simple_stmt, not stmt. - if symbol in (self.grammar.symbol2number['simple_stmt'], - self.grammar.symbol2number['stmt']): - index = i - self.stack[index:] = [] - else: - # For now just discard everything that is not a suite or - # file_input, if we detect an error. - for i, (dfa, state, node) in reversed(list(enumerate(self.stack))): - symbol, _, _, _ = node - - # `suite` can sometimes be only simple_stmt, not stmt. - if symbol in (self.grammar.symbol2number['file_input'], - self.grammar.symbol2number['suite']): - index = i - break - self.stack[index + 1:] = [] - # No success finding a transition - #raise ParseError("bad input", type, value, context)