1
0
forked from VimPlug/jedi

Don't start parsing in our own API.

This commit is contained in:
Dave Halter
2017-03-20 08:44:52 +01:00
parent 53b4e78a9b
commit 9dedb9ff68
4 changed files with 10 additions and 9 deletions

View File

@@ -134,7 +134,7 @@ def get_stack_at_position(grammar, code_lines, module_node, pos):
p = parser.ParserWithRecovery(grammar, code, start_parsing=False)
try:
p.parse(tokenizer=tokenize_without_endmarker(code))
p.parse(tokens=tokenize_without_endmarker(code))
except EndMarkerReached:
return Stack(p.pgen_parser.stack)
raise SystemError("This really shouldn't happen. There's a bug in Jedi.")

View File

@@ -323,7 +323,7 @@ class DiffParser(object):
parsed_until_line = self._nodes_stack.parsed_until_line
lines_after = self._parser_lines_new[parsed_until_line:]
#print('parse_content', parsed_until_line, lines_after, until_line)
tokenizer = self._diff_tokenize(
tokens = self._diff_tokenize(
lines_after,
until_line,
line_offset=parsed_until_line
@@ -333,7 +333,7 @@ class DiffParser(object):
source='\n',
start_parsing=False
)
return self._active_parser.parse(tokenizer=tokenizer)
return self._active_parser.parse(tokens=tokens)
def _diff_tokenize(self, lines, until_line, line_offset=0):
is_first_token = True

View File

@@ -79,8 +79,9 @@ def parse(code, grammar=None, error_recovery=True, start_symbol='file_input'):
else:
kwargs = dict(start_symbol=start_symbol)
parser = Parser
p = parser(grammar, code, tokens=tokens, **kwargs)
p = parser(grammar, code, start_parsing=False, **kwargs)
module = p.parse(tokens=tokens)
if added_newline:
p._remove_last_newline()
return p.get_root_node()
return module

View File

@@ -173,8 +173,8 @@ class ParserWithRecovery(Parser):
start_parsing=start_parsing
)
def parse(self, tokenizer):
root_node = super(ParserWithRecovery, self).parse(self._tokenize(tokenizer))
def parse(self, tokens):
root_node = super(ParserWithRecovery, self).parse(self._tokenize(tokens))
root_node.path = self._module_path
return root_node
@@ -242,8 +242,8 @@ class ParserWithRecovery(Parser):
stack[start_index:] = []
return failed_stack
def _tokenize(self, tokenizer):
for typ, value, start_pos, prefix in tokenizer:
def _tokenize(self, tokens):
for typ, value, start_pos, prefix in tokens:
# print(tokenize.tok_name[typ], repr(value), start_pos, repr(prefix))
if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT,