forked from VimPlug/jedi
Fix an issue with omited dedents in the parser.
This commit is contained in:
@@ -113,8 +113,9 @@ class Parser(object):
|
|||||||
'lambdef_nocond': pt.Lambda,
|
'lambdef_nocond': pt.Lambda,
|
||||||
}
|
}
|
||||||
|
|
||||||
self.global_names = []
|
self._global_names = []
|
||||||
self._omit_dedent = 0
|
self._omit_dedent_list = []
|
||||||
|
self._indent_counter = 0
|
||||||
self._last_failed_start_pos = (0, 0)
|
self._last_failed_start_pos = (0, 0)
|
||||||
|
|
||||||
# TODO do print absolute import detection here.
|
# TODO do print absolute import detection here.
|
||||||
@@ -126,9 +127,9 @@ class Parser(object):
|
|||||||
#if self.options["print_function"]:
|
#if self.options["print_function"]:
|
||||||
# python_grammar = pygram.python_grammar_no_print_statement
|
# python_grammar = pygram.python_grammar_no_print_statement
|
||||||
#else:
|
#else:
|
||||||
self.used_names = {}
|
self._used_names = {}
|
||||||
self.scope_names_stack = [{}]
|
self._scope_names_stack = [{}]
|
||||||
self.error_statement_stacks = []
|
self._error_statement_stacks = []
|
||||||
|
|
||||||
added_newline = False
|
added_newline = False
|
||||||
# The Python grammar needs a newline at the end of each statement.
|
# The Python grammar needs a newline at the end of each statement.
|
||||||
@@ -145,11 +146,10 @@ class Parser(object):
|
|||||||
|
|
||||||
if added_newline:
|
if added_newline:
|
||||||
self.remove_last_newline()
|
self.remove_last_newline()
|
||||||
self.module.used_names = self.used_names
|
self.module.used_names = self._used_names
|
||||||
self.module.path = module_path
|
self.module.path = module_path
|
||||||
self.module.global_names = self.global_names
|
self.module.global_names = self._global_names
|
||||||
self.module.error_statement_stacks = self.error_statement_stacks
|
self.module.error_statement_stacks = self._error_statement_stacks
|
||||||
self.grammar_symbols = grammar.number2symbol
|
|
||||||
|
|
||||||
def convert_node(self, grammar, type, children):
|
def convert_node(self, grammar, type, children):
|
||||||
"""
|
"""
|
||||||
@@ -168,25 +168,25 @@ class Parser(object):
|
|||||||
# We need to check raw_node always, because the same node can be
|
# We need to check raw_node always, because the same node can be
|
||||||
# returned by convert multiple times.
|
# returned by convert multiple times.
|
||||||
if symbol == 'global_stmt':
|
if symbol == 'global_stmt':
|
||||||
self.global_names += new_node.get_global_names()
|
self._global_names += new_node.get_global_names()
|
||||||
elif isinstance(new_node, pt.Lambda):
|
elif isinstance(new_node, pt.Lambda):
|
||||||
new_node.names_dict = self.scope_names_stack.pop()
|
new_node.names_dict = self._scope_names_stack.pop()
|
||||||
elif isinstance(new_node, (pt.ClassOrFunc, pt.Module)) \
|
elif isinstance(new_node, (pt.ClassOrFunc, pt.Module)) \
|
||||||
and symbol in ('funcdef', 'classdef', 'file_input'):
|
and symbol in ('funcdef', 'classdef', 'file_input'):
|
||||||
# scope_name_stack handling
|
# scope_name_stack handling
|
||||||
scope_names = self.scope_names_stack.pop()
|
scope_names = self._scope_names_stack.pop()
|
||||||
if isinstance(new_node, pt.ClassOrFunc):
|
if isinstance(new_node, pt.ClassOrFunc):
|
||||||
n = new_node.name
|
n = new_node.name
|
||||||
scope_names[n.value].remove(n)
|
scope_names[n.value].remove(n)
|
||||||
# Set the func name of the current node
|
# Set the func name of the current node
|
||||||
arr = self.scope_names_stack[-1].setdefault(n.value, [])
|
arr = self._scope_names_stack[-1].setdefault(n.value, [])
|
||||||
arr.append(n)
|
arr.append(n)
|
||||||
new_node.names_dict = scope_names
|
new_node.names_dict = scope_names
|
||||||
elif isinstance(new_node, pt.CompFor):
|
elif isinstance(new_node, pt.CompFor):
|
||||||
# The name definitions of comprehenions shouldn't be part of the
|
# The name definitions of comprehenions shouldn't be part of the
|
||||||
# current scope. They are part of the comprehension scope.
|
# current scope. They are part of the comprehension scope.
|
||||||
for n in new_node.get_defined_names():
|
for n in new_node.get_defined_names():
|
||||||
self.scope_names_stack[-1][n.value].remove(n)
|
self._scope_names_stack[-1][n.value].remove(n)
|
||||||
return new_node
|
return new_node
|
||||||
|
|
||||||
def convert_leaf(self, grammar, type, value, prefix, start_pos):
|
def convert_leaf(self, grammar, type, value, prefix, start_pos):
|
||||||
@@ -194,15 +194,15 @@ class Parser(object):
|
|||||||
if type == tokenize.NAME:
|
if type == tokenize.NAME:
|
||||||
if value in grammar.keywords:
|
if value in grammar.keywords:
|
||||||
if value in ('def', 'class', 'lambda'):
|
if value in ('def', 'class', 'lambda'):
|
||||||
self.scope_names_stack.append({})
|
self._scope_names_stack.append({})
|
||||||
|
|
||||||
return pt.Keyword(self.position_modifier, value, start_pos, prefix)
|
return pt.Keyword(self.position_modifier, value, start_pos, prefix)
|
||||||
else:
|
else:
|
||||||
name = pt.Name(self.position_modifier, value, start_pos, prefix)
|
name = pt.Name(self.position_modifier, value, start_pos, prefix)
|
||||||
# Keep a listing of all used names
|
# Keep a listing of all used names
|
||||||
arr = self.used_names.setdefault(name.value, [])
|
arr = self._used_names.setdefault(name.value, [])
|
||||||
arr.append(name)
|
arr.append(name)
|
||||||
arr = self.scope_names_stack[-1].setdefault(name.value, [])
|
arr = self._scope_names_stack[-1].setdefault(name.value, [])
|
||||||
arr.append(name)
|
arr.append(name)
|
||||||
return name
|
return name
|
||||||
elif type == token.STRING:
|
elif type == token.STRING:
|
||||||
@@ -254,7 +254,7 @@ class Parser(object):
|
|||||||
if typ == token.INDENT:
|
if typ == token.INDENT:
|
||||||
# For every deleted INDENT we have to delete a DEDENT as well.
|
# For every deleted INDENT we have to delete a DEDENT as well.
|
||||||
# Otherwise the parser will get into trouble and DEDENT too early.
|
# Otherwise the parser will get into trouble and DEDENT too early.
|
||||||
self._omit_dedent += 1
|
self._omit_dedent_list.append(self._indent_counter)
|
||||||
|
|
||||||
if value in ('import', 'from', 'class', 'def', 'try', 'while', 'return'):
|
if value in ('import', 'from', 'class', 'def', 'try', 'while', 'return'):
|
||||||
# Those can always be new statements.
|
# Those can always be new statements.
|
||||||
@@ -279,8 +279,8 @@ class Parser(object):
|
|||||||
except AttributeError:
|
except AttributeError:
|
||||||
if isinstance(c, pt.Name):
|
if isinstance(c, pt.Name):
|
||||||
try:
|
try:
|
||||||
self.scope_names_stack[-1][c.value].remove(c)
|
self._scope_names_stack[-1][c.value].remove(c)
|
||||||
self.used_names[c.value].remove(c)
|
self._used_names[c.value].remove(c)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass # This may happen with CompFor.
|
pass # This may happen with CompFor.
|
||||||
|
|
||||||
@@ -296,10 +296,10 @@ class Parser(object):
|
|||||||
symbol = grammar.number2symbol[typ]
|
symbol = grammar.number2symbol[typ]
|
||||||
failed_stack.append((symbol, nodes))
|
failed_stack.append((symbol, nodes))
|
||||||
if nodes and nodes[0] in ('def', 'class', 'lambda'):
|
if nodes and nodes[0] in ('def', 'class', 'lambda'):
|
||||||
self.scope_names_stack.pop()
|
self._scope_names_stack.pop()
|
||||||
if failed_stack:
|
if failed_stack:
|
||||||
err = ErrorStatement(failed_stack, value, self.position_modifier, start_pos)
|
err = ErrorStatement(failed_stack, value, self.position_modifier, start_pos)
|
||||||
self.error_statement_stacks.append(err)
|
self._error_statement_stacks.append(err)
|
||||||
|
|
||||||
self._last_failed_start_pos = start_pos
|
self._last_failed_start_pos = start_pos
|
||||||
|
|
||||||
@@ -308,10 +308,18 @@ class Parser(object):
|
|||||||
def _tokenize(self, tokenizer):
|
def _tokenize(self, tokenizer):
|
||||||
for typ, value, start_pos, prefix in tokenizer:
|
for typ, value, start_pos, prefix in tokenizer:
|
||||||
#print(token.tok_name[typ], repr(value), start_pos, repr(prefix))
|
#print(token.tok_name[typ], repr(value), start_pos, repr(prefix))
|
||||||
if self._omit_dedent and typ == token.DEDENT:
|
if typ == token.DEDENT:
|
||||||
self._omit_dedent -= 1
|
# We need to count indents, because if we just omit any DEDENT,
|
||||||
|
# we might omit them in the wrong place.
|
||||||
|
o = self._omit_dedent_list
|
||||||
|
if o and o[-1] == self._indent_counter:
|
||||||
|
o.pop()
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
self._indent_counter -= 1
|
||||||
|
elif typ == token.INDENT:
|
||||||
|
self._indent_counter += 1
|
||||||
|
|
||||||
if typ == token.OP:
|
if typ == token.OP:
|
||||||
typ = token.opmap[value]
|
typ = token.opmap[value]
|
||||||
yield typ, value, prefix, start_pos
|
yield typ, value, prefix, start_pos
|
||||||
|
|||||||
@@ -627,10 +627,11 @@ class FastTokenizer(object):
|
|||||||
# information. However we care about "lost" lines. The prefix of
|
# information. However we care about "lost" lines. The prefix of
|
||||||
# the current line (indent) will always be included in the current
|
# the current line (indent) will always be included in the current
|
||||||
# line.
|
# line.
|
||||||
if self.current[0] == DEDENT:
|
cur = self.current
|
||||||
prefix = next(self._gen)[3]
|
while cur[0] == DEDENT:
|
||||||
else:
|
cur = next(self._gen)
|
||||||
prefix = self.current[3]
|
prefix = cur[3]
|
||||||
|
|
||||||
# \Z for the end of the string. $ is bugged, because it has the
|
# \Z for the end of the string. $ is bugged, because it has the
|
||||||
# same behavior with or without re.MULTILINE.
|
# same behavior with or without re.MULTILINE.
|
||||||
prefix = re.sub(r'[^\n]+\Z', '', prefix)
|
prefix = re.sub(r'[^\n]+\Z', '', prefix)
|
||||||
|
|||||||
@@ -242,6 +242,21 @@ def test_one_statement_func():
|
|||||||
cache.parser_cache.pop(None, None)
|
cache.parser_cache.pop(None, None)
|
||||||
check_fp(src + 'def second():\n a', 3)
|
check_fp(src + 'def second():\n a', 3)
|
||||||
|
|
||||||
|
|
||||||
|
def test_class_func_if():
|
||||||
|
src = dedent("""\
|
||||||
|
class Class:
|
||||||
|
def func(self):
|
||||||
|
if 1:
|
||||||
|
a
|
||||||
|
else:
|
||||||
|
b
|
||||||
|
|
||||||
|
pass
|
||||||
|
""")
|
||||||
|
check_fp(src, 3)
|
||||||
|
|
||||||
|
|
||||||
def test_wrong_indentation():
|
def test_wrong_indentation():
|
||||||
src = dedent("""\
|
src = dedent("""\
|
||||||
def func():
|
def func():
|
||||||
|
|||||||
Reference in New Issue
Block a user